From bcfaff97f9e8c25e10a5076d13bd36582c7e2f5e Mon Sep 17 00:00:00 2001 From: jinyaohui Date: Mon, 18 May 2020 10:31:46 +0800 Subject: [PATCH] clean pylint --- .../apps/bert_attention_submodules.py | 6 +- .../apps/test_bert_check_gradient.py | 8 +- .../apps/test_bert_compare_with_npy.py | 17 +- .../apps/test_bert_ops_check_gradient.py | 1 + .../apps/test_check_exception.py | 3 + .../apps/test_lamb_check_loss.py | 1 + .../apps/test_model_loss.py | 1 + .../apps/test_no_facade.py | 2 + ...test_reid_gradient_compare_with_pytorch.py | 1 + .../components/executor/check_exceptions.py | 2 + .../check_gradient_for_scalar_func.py | 2 + .../executor/check_gradient_wrt_inputs.py | 2 + .../executor/check_gradient_wrt_params.py | 2 + .../check_jacobian_for_scalar_func.py | 2 + .../executor/check_jacobian_wrt_inputs.py | 2 + .../executor/check_jacobian_wrt_params.py | 2 + .../executor/exec_and_verify_model_loss.py | 2 + .../components/executor/exec_forward.py | 2 + .../components/executor/exec_gradient.py | 2 + ...sian_product_on_group_for_expect_result.py | 2 + ...rtesian_product_on_id_for_expect_result.py | 2 + .../components/facade/me_facade.py | 2 + .../components/function/compile_block.py | 2 + .../function/compile_gradient_wrt_inputs.py | 2 + .../function/compile_gradient_wrt_params.py | 2 + .../function/get_function_from_config.py | 2 + .../init_params_with_rand_and_run_block.py | 2 + ...s_with_rand_and_run_gradient_wrt_inputs.py | 1 + ...s_with_rand_and_run_gradient_wrt_params.py | 1 + .../components/function/run_block.py | 2 + .../function/run_gradient_wrt_inputs.py | 1 + .../function/run_gradient_wrt_params.py | 1 + ...an_product_on_group_for_function_inputs.py | 1 + ...esian_product_on_id_for_function_inputs.py | 2 + .../components/icomponent.py | 9 + .../generate_dataset_for_linear_regression.py | 2 + .../inputs/generate_inputs_from_shape.py | 2 + .../inputs/get_inputs_from_config.py | 2 + .../components/inputs/load_inputs_from_npy.py | 2 + .../components/verifier/compare_forward.py | 2 + .../components/verifier/compare_gradient.py | 2 + .../verifier/verify_expect_from_npy.py | 2 + .../components/verifier/verify_shapetype.py | 2 + .../mindspore_test.py | 6 +- .../pipeline/forward/compare_forward.py | 4 +- .../pipeline/gradient/compare_gradient.py | 10 +- .../pipeline/gradient/compile_gradient.py | 4 +- .../utils/block_util.py | 22 ++ .../utils/bprop_util.py | 5 + .../utils/check_gradient.py | 5 +- .../utils/compare_util.py | 1 + .../utils/config_util.py | 4 + .../utils/dataset_util.py | 1 + .../utils/debug_util.py | 6 +- .../utils/facade_util.py | 6 +- .../mindspore_test_framework/utils/keyword.py | 3 + .../utils/model_util.py | 11 + .../utils/npy_util.py | 2 + .../utils/other_util.py | 7 +- .../utils/verifier_util.py | 10 +- tests/ops_common.py | 19 +- tests/st/auto_parallel/test_expand_loss.py | 2 +- .../test_model_parallel_onehot.py | 2 +- tests/st/control/test_cont_break.py | 15 +- tests/st/gnn/aggregator.py | 8 +- tests/st/gnn/test_gnn_aggregator.py | 1 + tests/st/nccl/test_nccl_all.py | 8 +- tests/st/nccl/test_nccl_lenet.py | 4 +- tests/st/nccl/test_nccl_reduce_scatter_op.py | 1 + .../models/bert/bert_tdt_lossscale.py | 21 +- tests/st/networks/test_gpu_alexnet.py | 4 +- tests/st/networks/test_gpu_lstm.py | 9 +- tests/st/networks/test_gpu_resnet.py | 9 +- tests/st/ops/cpu/test_argmax_op.py | 10 +- tests/st/ops/cpu/test_bias_add.py | 20 +- tests/st/ops/cpu/test_bias_add_grad.py | 16 +- .../ops/cpu/test_conv2d_backprop_filter_op.py | 45 ++-- .../ops/cpu/test_conv2d_backprop_input_op.py | 41 ++-- tests/st/ops/cpu/test_conv2d_op.py | 10 +- tests/st/ops/cpu/test_equalcount_op.py | 9 +- tests/st/ops/cpu/test_maxpool_grad_op.py | 3 +- tests/st/ops/cpu/test_maxpool_op.py | 5 + tests/st/ops/cpu/test_momentum_op.py | 8 +- tests/st/ops/cpu/test_mul_op.py | 30 +-- tests/st/ops/cpu/test_relu_grad_op.py | 5 +- tests/st/ops/cpu/test_relu_op.py | 9 +- tests/st/ops/cpu/test_softmax_op.py | 7 +- .../cpu/test_softmax_with_cross_entropy_op.py | 21 +- tests/st/ops/custom_ops_tbe/conv2d.py | 5 +- tests/st/ops/custom_ops_tbe/conv_layer.py | 33 +-- .../st/ops/custom_ops_tbe/conv_layer_fast.py | 9 +- tests/st/ops/custom_ops_tbe/cus_conv2d.py | 8 +- tests/st/ops/custom_ops_tbe/cus_square.py | 1 + tests/st/ops/custom_ops_tbe/test_cus_conv.py | 21 +- tests/st/ops/custom_ops_tbe/test_square.py | 7 +- tests/st/ops/gpu/test_addn_op.py | 1 + tests/st/ops/gpu/test_argmax_op.py | 20 +- tests/st/ops/gpu/test_assign_add_op.py | 24 +- tests/st/ops/gpu/test_assign_op.py | 2 + tests/st/ops/gpu/test_batch_matmul.py | 86 +++---- tests/st/ops/gpu/test_batchnorm_op.py | 1 + tests/st/ops/gpu/test_broadcast_op.py | 5 +- tests/st/ops/gpu/test_concatv2_op.py | 7 +- .../ops/gpu/test_conv2d_backprop_filter_op.py | 1 + .../st/ops/gpu/test_correction_mul_grad_op.py | 1 - tests/st/ops/gpu/test_equalcount_op.py | 2 + tests/st/ops/gpu/test_exp_op.py | 1 + tests/st/ops/gpu/test_float_status_op.py | 11 +- tests/st/ops/gpu/test_gelu_grad_op.py | 12 +- tests/st/ops/gpu/test_gelu_op.py | 4 + tests/st/ops/gpu/test_layer_norm_grad_op.py | 22 +- tests/st/ops/gpu/test_layer_norm_op.py | 12 +- tests/st/ops/gpu/test_lessequal_op.py | 1 - tests/st/ops/gpu/test_logical_op.py | 7 +- tests/st/ops/gpu/test_logsoftmax_op.py | 104 +++++--- tests/st/ops/gpu/test_maximum_op.py | 223 +++++++++--------- tests/st/ops/gpu/test_minimum_op.py | 221 +++++++++-------- tests/st/ops/gpu/test_momentum_op.py | 3 + tests/st/ops/gpu/test_realdiv_op.py | 1 + tests/st/ops/gpu/test_reciprocal_op.py | 1 + tests/st/ops/gpu/test_select_op.py | 2 + tests/st/ops/gpu/test_slice.py | 1 + ...st_softmax_cross_entropy_with_logits_op.py | 15 +- tests/st/ops/gpu/test_softmax_op.py | 196 +++++++-------- ...se_softmax_cross_entropy_with_logits_op.py | 11 +- tests/st/ops/gpu/test_sqrt_op.py | 1 - tests/st/ops/gpu/test_tanh_op.py | 25 +- tests/st/ops/gpu/test_tensoradd.py | 2 + tests/st/ops/gpu/test_transpose_op.py | 1 + tests/st/ops/gpu/test_unsorted_segment_sum.py | 81 +++---- tests/st/ops/gpu/test_zeroslike_op.py | 2 + tests/st/ops/test_rmsprop.py | 19 +- tests/st/pynative/test_ascend_lenet.py | 2 +- tests/st/summary/test_davinci_summary.py | 2 +- tests/st/summary/test_gpu_summary.py | 3 - .../python_input/gtest_input/ir/clone_test.py | 16 ++ .../gtest_input/ir/manager_test.py | 15 ++ .../gtest_input/mem_reuse/mem_reuse_test.py | 1 + .../gtest_input/optimizer/ad/ad_test.py | 56 ++++- .../gtest_input/optimizer/cconv_test.py | 53 ++++- .../gtest_input/optimizer/clean_test.py | 4 +- .../gtest_input/optimizer/opt_test.py | 136 +++++++++++ .../gtest_input/pipeline/infer/infer_test.py | 1 + .../pipeline/infer/primitive_test.py | 21 ++ .../gtest_input/pipeline/parse/parse_class.py | 4 +- .../pipeline/parse/parse_compile.py | 4 +- .../pipeline/parse/parse_primitive.py | 9 +- .../pipeline/parse/parser_integrate.py | 26 +- .../gtest_input/pipeline/parse/parser_test.py | 81 +++++-- .../pre_activate/batch_norm_grad_split.py | 2 + .../pre_activate/batchnorm_to_bninfer.py | 2 + .../batchnormgrad_to_bninfergrad.py | 2 + .../pre_activate/confusion_mul_grad_fusion.py | 2 + .../gtest_input/pre_activate/derelu_fusion.py | 2 + .../eliminate_redundant_op_test.py | 1 + .../matmul_biasadd_fusion_test.py | 1 + .../momentum_lossscale_fusion_test.py | 1 + .../pre_activate/mul_add_fusion_test.py | 2 + .../reshape_transpose_fusion_test.py | 1 + .../pre_activate/topk_split_test.py | 1 + .../transpose_reshape_fusion_test.py | 1 + .../transpose_transdata_fusion_test.py | 1 + .../gtest_input/pynative/ops_test.py | 14 +- .../gtest_input/session/session_test.py | 3 +- .../gtest_input/transform/multi_relu_case.py | 5 + .../gtest_input/utils/graph_utils_test.py | 4 + .../python_input/gtest_input/vm/vm_test.py | 4 + .../data/dataset/testPyfuncMap/pyfuncmap.py | 23 +- tests/ut/python/communication/__init__.py | 3 +- tests/ut/python/communication/test_comm.py | 18 ++ .../communication/test_data_parallel_dense.py | 1 + .../communication/test_data_parallel_lenet.py | 3 + .../communication/test_management_api.py | 29 ++- tests/ut/python/conftest.py | 2 + tests/ut/python/dataset/test_2ops.py | 3 +- tests/ut/python/dataset/test_Tensor.py | 2 + tests/ut/python/dataset/test_apply.py | 24 +- tests/ut/python/dataset/test_autocontrast.py | 63 +++-- tests/ut/python/dataset/test_batch.py | 3 +- tests/ut/python/dataset/test_concat.py | 5 +- tests/ut/python/dataset/test_config.py | 14 +- tests/ut/python/dataset/test_cut_out.py | 1 - .../python/dataset/test_datasets_imagenet.py | 2 - .../test_datasets_imagenet_distribution.py | 3 + .../python/dataset/test_datasets_sharding.py | 13 +- .../dataset/test_datasets_textfileop.py | 26 +- tests/ut/python/dataset/test_datasets_voc.py | 32 ++- tests/ut/python/dataset/test_decode.py | 1 - tests/ut/python/dataset/test_deviceop_cpu.py | 1 + tests/ut/python/dataset/test_equalize.py | 63 +++-- tests/ut/python/dataset/test_exceptions.py | 2 +- tests/ut/python/dataset/test_filterop.py | 148 +++++++----- tests/ut/python/dataset/test_generator.py | 16 +- tests/ut/python/dataset/test_invert.py | 62 ++--- tests/ut/python/dataset/test_iterator.py | 2 +- tests/ut/python/dataset/test_minddataset.py | 42 +++- .../dataset/test_minddataset_exception.py | 16 +- .../dataset/test_minddataset_multi_images.py | 2 +- ...st_minddataset_multi_images_and_ndarray.py | 17 +- .../dataset/test_minddataset_sampler.py | 9 +- .../dataset/test_mixup_label_smoothing.py | 20 +- tests/ut/python/dataset/test_pad.py | 8 +- tests/ut/python/dataset/test_project.py | 1 - tests/ut/python/dataset/test_pyfunc.py | 18 +- tests/ut/python/dataset/test_random_color.py | 60 ++--- .../dataset/test_random_color_adjust.py | 9 +- tests/ut/python/dataset/test_random_crop.py | 1 - .../ut/python/dataset/test_random_dataset.py | 12 +- .../python/dataset/test_random_sharpness.py | 60 ++--- tests/ut/python/dataset/test_rename.py | 4 +- tests/ut/python/dataset/test_rgb_hsv.py | 2 +- tests/ut/python/dataset/test_sampler.py | 4 +- .../ut/python/dataset/test_serdes_dataset.py | 11 +- tests/ut/python/dataset/test_shuffle.py | 2 +- tests/ut/python/dataset/test_skip.py | 8 +- tests/ut/python/dataset/test_sync_wait.py | 22 +- tests/ut/python/dataset/test_take.py | 8 +- tests/ut/python/dataset/test_tensor_string.py | 2 +- tests/ut/python/dataset/test_tfreader_op.py | 5 +- tests/ut/python/dataset/test_type_cast.py | 2 +- .../ut/python/dataset/test_uniform_augment.py | 72 +++--- tests/ut/python/dataset/test_var_batch_map.py | 6 +- tests/ut/python/dataset/test_zip.py | 2 - tests/ut/python/dataset/util.py | 2 +- tests/ut/python/hccl_test/manage/api.py | 6 + tests/ut/python/mindrecord/skip_test_issue.py | 56 ++++- .../skip_test_mindrecord_internal.py | 4 +- .../mindrecord/skip_test_mindrecord_shard.py | 11 + .../mindrecord/test_cifar100_to_mindrecord.py | 8 + .../mindrecord/test_cifar10_to_mindrecord.py | 9 + .../mindrecord/test_imagenet_to_mindrecord.py | 6 + .../python/mindrecord/test_mindrecord_base.py | 46 +++- .../mindrecord/test_mindrecord_exception.py | 53 +++-- .../test_mindrecord_multi_images.py | 14 +- tests/ut/python/mindrecord/utils.py | 17 +- tests/ut/python/onnx/test_onnx.py | 18 +- tests/ut/python/ops/__init__.py | 1 + tests/ut/python/ops/test_array_ops.py | 8 +- tests/ut/python/ops/test_array_ops_check.py | 8 +- tests/ut/python/ops/test_bprop_disorder.py | 6 +- tests/ut/python/ops/test_control_ops.py | 15 +- tests/ut/python/ops/test_list.py | 1 + tests/ut/python/ops/test_math_ops.py | 3 + tests/ut/python/ops/test_math_ops_check.py | 76 +++--- tests/ut/python/ops/test_momentum.py | 6 + tests/ut/python/ops/test_multitype_ops.py | 8 + tests/ut/python/ops/test_nn_ops.py | 4 +- tests/ut/python/ops/test_nn_ops_check.py | 91 +++---- tests/ut/python/ops/test_ops.py | 5 +- tests/ut/python/ops/test_ops_check.py | 7 + tests/ut/python/ops/test_ops_reid.py | 33 +-- tests/ut/python/ops/test_python_operators.py | 2 +- tests/ut/python/ops/test_tensor_slice.py | 66 +++--- tests/ut/python/ops/test_tuple.py | 3 + tests/ut/python/ops/test_tuple_slice.py | 12 +- tests/ut/python/parallel/conftest.py | 1 + .../add_relu/_test_add_relu_parallel_4p.py | 54 +++-- .../_test_conv2d_parallel_4p.py | 97 ++++---- .../dropout/_test_dropout_parallel_4p.py | 49 ++-- .../hcom/_test_allgather_4p.py | 84 ++++--- .../hcom/_test_allreduce_4p.py | 104 ++++---- .../_test_l2normalize_parallel_4p.py | 77 +++--- .../loss/_test_loss_parallel_4p.py | 78 +++--- .../matmul/_test_matmul_parallel_4p.py | 125 +++++----- .../max/_test_max_parallel_4p.py | 70 ++++-- .../need_fix_test_mul_softmax_parallel_4p.py | 73 +++--- .../onehot/_test_onehot_parallel_4p.py | 45 ++-- .../prelu/_test_prelu_parallel_4p.py | 86 ++++--- .../reshape/_test_reshape_parallel_4p.py | 69 +++--- .../transpose/_test_transpose_parallel_4p.py | 97 ++++---- .../parallel/test_add_relu_redistribution.py | 14 +- .../python/parallel/test_allreduce_fusion.py | 21 +- tests/ut/python/parallel/test_alltoall.py | 15 +- tests/ut/python/parallel/test_arithmetic.py | 21 +- .../parallel/test_auto_parallel_BN_PReLU.py | 9 +- .../parallel/test_auto_parallel_arithmetic.py | 6 +- ...t_auto_parallel_assign_sub_with_ref_key.py | 10 +- .../parallel/test_auto_parallel_cast.py | 7 +- .../test_auto_parallel_common_parameter.py | 6 +- .../test_auto_parallel_double_star.py | 4 + .../test_auto_parallel_double_subgraphs.py | 6 + .../parallel/test_auto_parallel_fc_nobias.py | 4 + .../test_auto_parallel_four_matmul.py | 7 +- .../parallel/test_auto_parallel_inference.py | 4 +- .../test_auto_parallel_l2normalize.py | 1 - .../test_auto_parallel_matmul_prelu.py | 6 +- .../parallel/test_auto_parallel_onehot.py | 4 +- .../test_auto_parallel_parameter_cast.py | 8 +- .../test_auto_parallel_partial_strategy.py | 4 + .../test_auto_parallel_reduce_method.py | 2 + .../parallel/test_auto_parallel_reshape.py | 37 +-- .../parallel/test_auto_parallel_rhombus.py | 4 + .../test_auto_parallel_softmax_loss.py | 1 + .../test_auto_parallel_transformer.py | 7 +- .../parallel/test_auto_parallel_transpose.py | 2 + .../test_auto_parallel_tuple_depend.py | 3 +- .../parallel/test_auto_parallel_two_bn.py | 4 + .../parallel/test_auto_parallel_two_matmul.py | 11 +- .../test_auto_parallel_two_partial_matmul.py | 6 +- .../parallel/test_auto_parallel_zig_zag.py | 4 + .../parallel/test_auto_star_elimination.py | 6 +- tests/ut/python/parallel/test_batch_matmul.py | 2 +- .../ut/python/parallel/test_batch_parallel.py | 1 - .../parallel/test_batch_parallel_tensoradd.py | 3 +- .../parallel/test_batchnorm_batch_parallel.py | 9 +- .../ut/python/parallel/test_bn_prelu_cell.py | 12 +- tests/ut/python/parallel/test_bool_grad.py | 8 +- .../python/parallel/test_combined_tensor.py | 21 +- .../parallel/test_comparison_function_info.py | 4 +- tests/ut/python/parallel/test_dataset.py | 1 - .../python/parallel/test_dataset_interface.py | 12 +- tests/ut/python/parallel/test_dataset_util.py | 18 +- .../test_different_type_for_div_op.py | 12 +- .../parallel/test_element_wise_function.py | 19 +- tests/ut/python/parallel/test_expand_dims.py | 12 +- .../ut/python/parallel/test_forward_graph.py | 11 +- tests/ut/python/parallel/test_gather_v2.py | 24 +- .../parallel/test_gather_v2_primitive.py | 13 +- tests/ut/python/parallel/test_get_next.py | 41 ++-- .../parallel/test_get_parameter_layout.py | 5 +- .../test_hybird_parallel_activation.py | 28 ++- tests/ut/python/parallel/test_l2normalize.py | 3 +- tests/ut/python/parallel/test_layer_norm.py | 2 +- tests/ut/python/parallel/test_linear.py | 6 +- .../python/parallel/test_loop_two_matmul.py | 1 - .../parallel/test_loss_and_optimizer.py | 23 +- .../ut/python/parallel/test_matmul_dropout.py | 2 +- .../ut/python/parallel/test_matmul_tensor.py | 10 +- .../test_mix_precision_hybrid_parallel.py | 2 +- tests/ut/python/parallel/test_neg.py | 11 +- tests/ut/python/parallel/test_one_dev.py | 3 +- tests/ut/python/parallel/test_one_hot_net.py | 39 ++- .../parallel/test_one_weight_parameter.py | 5 +- tests/ut/python/parallel/test_onehot.py | 19 +- .../parallel/test_operator_model_parallel.py | 10 +- tests/ut/python/parallel/test_optimizer.py | 1 - .../parallel/test_optimizer_clone_weight.py | 12 +- .../ut/python/parallel/test_parameter_init.py | 2 +- tests/ut/python/parallel/test_prelu.py | 42 ++-- tests/ut/python/parallel/test_prelu_cell.py | 9 +- .../parallel/test_reduce_method_info.py | 53 ++--- tests/ut/python/parallel/test_reshape.py | 50 ++-- tests/ut/python/parallel/test_scalar_loss.py | 8 +- .../parallel/test_semi_auto_two_subgraphs.py | 2 +- .../test_set_auto_parallel_context.py | 1 + .../test_sigmoid_cross_entropy_with_logits.py | 2 +- .../test_softmax_cross_entropy_expand.py | 1 + .../test_softmax_cross_entropy_loss.py | 5 +- .../python/parallel/test_split_grad_sens.py | 10 +- tests/ut/python/parallel/test_square.py | 10 +- tests/ut/python/parallel/test_squeeze_info.py | 10 +- .../parallel/test_strategy_checkpoint.py | 9 +- tests/ut/python/parallel/test_sum_as_loss.py | 6 +- tests/ut/python/parallel/test_transpose.py | 13 +- tests/ut/python/parallel/test_two_matmul.py | 2 +- .../parallel/test_two_weights_parameter.py | 6 +- .../test_using_seed_for_initializer.py | 7 +- .../parallel/test_virtual_dataset_3_input.py | 3 +- .../infer/test_hypermap_specialize.py | 4 +- .../python/pipeline/infer/test_net_infer.py | 1 + .../pipeline/infer/test_scalar_add_grad.py | 12 + .../ut/python/pipeline/parse/test_celllist.py | 2 + .../ut/python/pipeline/parse/test_compile.py | 2 + .../python/pipeline/parse/test_cont_break.py | 21 +- .../python/pipeline/parse/test_create_obj.py | 4 + .../ut/python/pipeline/parse/test_fix_bug.py | 3 + .../ut/python/pipeline/parse/test_for_stmt.py | 1 + .../parse/test_graph_return_const_param.py | 1 + .../ut/python/pipeline/parse/test_operator.py | 2 + tests/ut/python/pipeline/parse/test_parse.py | 9 +- .../python/pipeline/parse/test_serialize.py | 1 + tests/ut/python/pynative_mode/__init__.py | 1 + .../pynative_mode/engine/test_cell_wrapper.py | 1 + .../ge/model/test_lenet_model.py | 1 + .../pynative_mode/ge/ops/test_batchnorm.py | 10 +- .../python/pynative_mode/ge/ops/test_conv.py | 4 +- .../pynative_mode/ge/ops/test_tensor_add.py | 1 + .../python/pynative_mode/nn/test_batchnorm.py | 2 +- tests/ut/python/pynative_mode/nn/test_cell.py | 10 +- .../pynative_mode/nn/test_checkparameter.py | 1 - .../python/pynative_mode/nn/test_container.py | 1 - tests/ut/python/pynative_mode/nn/test_conv.py | 1 - .../ut/python/pynative_mode/nn/test_dense.py | 4 +- .../python/pynative_mode/nn/test_dropout.py | 1 + tests/ut/python/pynative_mode/nn/test_loss.py | 1 + .../python/pynative_mode/nn/test_pooling.py | 8 +- .../python/pynative_mode/ops/test_hypermap.py | 1 + .../pynative_mode/ops/test_multitype.py | 1 - tests/ut/python/pynative_mode/test_bprop.py | 9 + .../python/pynative_mode/test_cell_bprop.py | 75 +++++- .../python/pynative_mode/test_framstruct.py | 204 +++++++++++++--- .../pynative_mode/test_high_order_grad.py | 4 +- .../pynative_mode/test_insert_grad_of.py | 25 ++ .../pynative_mode/test_multigraph_sink.py | 2 +- .../python/pynative_mode/test_parse_method.py | 7 + .../pynative_mode/test_pynative_model.py | 7 + .../test_remove_unnecessary_phi.py | 4 + tests/ut/python/pynative_mode/test_staging.py | 4 + .../pynative_mode/test_stop_gradient.py | 80 +++++-- .../ut/python/pynative_mode/test_training.py | 1 + tests/ut/python/pynative_mode/vm/test_vm.py | 2 +- .../train/summary/test_histogram_summary.py | 6 - .../train/summary/test_image_summary.py | 3 - tests/ut/python/train/summary/test_summary.py | 7 - .../test_summary_ops_params_valid_check.py | 2 - .../train/summary/test_tensor_summary.py | 2 - tests/ut/python/ut_filter.py | 6 + tests/ut/python/utils/test_initializer.py | 1 + 408 files changed, 4065 insertions(+), 2357 deletions(-) diff --git a/tests/mindspore_test_framework/apps/bert_attention_submodules.py b/tests/mindspore_test_framework/apps/bert_attention_submodules.py index 5bd7be9c47..8a6de09d7f 100644 --- a/tests/mindspore_test_framework/apps/bert_attention_submodules.py +++ b/tests/mindspore_test_framework/apps/bert_attention_submodules.py @@ -166,7 +166,7 @@ class BertAttentionMask(nn.Cell): super(BertAttentionMask, self).__init__() self.has_attention_mask = has_attention_mask - self.multiply_data = Tensor([-1000.0,], dtype=dtype) + self.multiply_data = Tensor([-1000.0, ], dtype=dtype) self.multiply = P.Mul() if self.has_attention_mask: @@ -189,6 +189,7 @@ class BertAttentionMask(nn.Cell): return attention_scores + class BertAttentionMaskBackward(nn.Cell): def __init__(self, attention_mask_shape, @@ -196,7 +197,7 @@ class BertAttentionMaskBackward(nn.Cell): dtype=mstype.float32): super(BertAttentionMaskBackward, self).__init__() self.has_attention_mask = has_attention_mask - self.multiply_data = Tensor([-1000.0,], dtype=dtype) + self.multiply_data = Tensor([-1000.0, ], dtype=dtype) self.multiply = P.Mul() self.attention_mask = Tensor(np.ones(shape=attention_mask_shape).astype(np.float32)) if self.has_attention_mask: @@ -218,6 +219,7 @@ class BertAttentionMaskBackward(nn.Cell): attention_scores = self.add(adder, attention_scores) return attention_scores + class BertAttentionSoftmax(nn.Cell): def __init__(self, batch_size, diff --git a/tests/mindspore_test_framework/apps/test_bert_check_gradient.py b/tests/mindspore_test_framework/apps/test_bert_check_gradient.py index 60f85813ef..f404f9f81d 100644 --- a/tests/mindspore_test_framework/apps/test_bert_check_gradient.py +++ b/tests/mindspore_test_framework/apps/test_bert_check_gradient.py @@ -20,7 +20,7 @@ import numpy as np from mindspore.model_zoo.Bert_NEZHA import GetNextSentenceOutput, BertNetworkWithLoss from mindspore.model_zoo.Bert_NEZHA.bert_model import BertConfig, \ EmbeddingLookup, EmbeddingPostprocessor, BertOutput, \ - BertAttention, BertSelfAttention, SaturateCast, TruncatedNormal,\ + BertAttention, BertSelfAttention, SaturateCast, TruncatedNormal, \ BertEncoderCell, BertTransformer, CreateAttentionMaskFromInputMask, BertModel from mindspore import context, nn @@ -373,9 +373,9 @@ verification_set = { 'id': 'BertDense_CICase', 'group': 'bert', 'block': BertDense( - hidden_size=8, - intermediate_size=8, - initializer_range=0.02), + hidden_size=8, + intermediate_size=8, + initializer_range=0.02), 'reduce_output': False }, { diff --git a/tests/mindspore_test_framework/apps/test_bert_compare_with_npy.py b/tests/mindspore_test_framework/apps/test_bert_compare_with_npy.py index 92d2e9a8d5..99cc588c86 100644 --- a/tests/mindspore_test_framework/apps/test_bert_compare_with_npy.py +++ b/tests/mindspore_test_framework/apps/test_bert_compare_with_npy.py @@ -19,17 +19,18 @@ import numpy as np import mindspore.common.dtype as mstype from mindspore import context from mindspore.model_zoo.Bert_NEZHA.bert_model import BertAttention, SaturateCast, \ - EmbeddingLookup, BertModel, \ - BertConfig, EmbeddingPostprocessor, \ - BertTransformer, BertEncoderCell, \ - BertSelfAttention, CreateAttentionMaskFromInputMask, \ - RelaPosMatrixGenerator, BertOutput, \ - RelaPosEmbeddingsGenerator + EmbeddingLookup, BertModel, \ + BertConfig, EmbeddingPostprocessor, \ + BertTransformer, BertEncoderCell, \ + BertSelfAttention, CreateAttentionMaskFromInputMask, \ + RelaPosMatrixGenerator, BertOutput, \ + RelaPosEmbeddingsGenerator from ..mindspore_test import mindspore_test -from ..pipeline.forward.compare_forward import pipeline_for_compare_forward_with_npy_for_group_by_group_config_using_group_policy +from ..pipeline.forward.compare_forward import \ + pipeline_for_compare_forward_with_npy_for_group_by_group_config_using_group_policy from .bert_attention_submodules import BertAttentionQueryKeyMul, BertAttentionRelativePositionKeys, BertAttentionMask, \ - BertAttentionSoftmax, BertAttentionRelativePositionValues, BertDense + BertAttentionSoftmax, BertAttentionRelativePositionValues, BertDense verification_set = { 'inputs': [ diff --git a/tests/mindspore_test_framework/apps/test_bert_ops_check_gradient.py b/tests/mindspore_test_framework/apps/test_bert_ops_check_gradient.py index 2f11a580c9..58d44ba856 100644 --- a/tests/mindspore_test_framework/apps/test_bert_ops_check_gradient.py +++ b/tests/mindspore_test_framework/apps/test_bert_ops_check_gradient.py @@ -22,6 +22,7 @@ from ..pipeline.gradient.compare_gradient import \ pipeline_for_compare_inputs_grad_with_numerical_diff_for_group_by_group_config, \ pipeline_for_compare_inputs_jacobian_with_numerical_diff_for_group_by_group_config from ..mindspore_test import mindspore_test + # from ...vm_impl import * diff --git a/tests/mindspore_test_framework/apps/test_check_exception.py b/tests/mindspore_test_framework/apps/test_check_exception.py index d465ef7ac4..60cee2aea0 100644 --- a/tests/mindspore_test_framework/apps/test_check_exception.py +++ b/tests/mindspore_test_framework/apps/test_check_exception.py @@ -18,9 +18,11 @@ from ..mindspore_test import mindspore_test from ..pipeline.forward.verify_exception import pipeline_for_verify_exception_for_case_by_case_config + def func_raise_exception(x, y): raise ValueError() + verification_set = [ ('func_raise_exception', { 'block': (func_raise_exception, {'exception': ValueError}), @@ -28,6 +30,7 @@ verification_set = [ }) ] + @mindspore_test(pipeline_for_verify_exception_for_case_by_case_config) def test_check_exception(): return verification_set diff --git a/tests/mindspore_test_framework/apps/test_lamb_check_loss.py b/tests/mindspore_test_framework/apps/test_lamb_check_loss.py index 14461ff2d8..c73a0d7e19 100644 --- a/tests/mindspore_test_framework/apps/test_lamb_check_loss.py +++ b/tests/mindspore_test_framework/apps/test_lamb_check_loss.py @@ -42,6 +42,7 @@ verification_set = [ }) ] + @mindspore_test(pipeline_for_check_model_loss_for_case_by_case_config) def test_lamb_loss(): context.set_context(mode=context.GRAPH_MODE) diff --git a/tests/mindspore_test_framework/apps/test_model_loss.py b/tests/mindspore_test_framework/apps/test_model_loss.py index dd1f2bd721..81927c9509 100644 --- a/tests/mindspore_test_framework/apps/test_model_loss.py +++ b/tests/mindspore_test_framework/apps/test_model_loss.py @@ -40,6 +40,7 @@ verification_set = [ }) ] + @mindspore_test(pipeline_for_check_model_loss_for_case_by_case_config) def test_model_loss(): context.set_context(mode=context.GRAPH_MODE) diff --git a/tests/mindspore_test_framework/apps/test_no_facade.py b/tests/mindspore_test_framework/apps/test_no_facade.py index 0a26448de1..e50642c38b 100644 --- a/tests/mindspore_test_framework/apps/test_no_facade.py +++ b/tests/mindspore_test_framework/apps/test_no_facade.py @@ -21,6 +21,8 @@ import numpy as np from ..mindspore_test import mindspore_test from ..pipeline.forward.verify_shapetype import pipeline_for_verify_shapetype_for_group_by_group_config + + # from ...vm_impl import * # functions could be operations or NN cell diff --git a/tests/mindspore_test_framework/apps/test_reid_gradient_compare_with_pytorch.py b/tests/mindspore_test_framework/apps/test_reid_gradient_compare_with_pytorch.py index 1a0d69f4df..9b87b77e80 100644 --- a/tests/mindspore_test_framework/apps/test_reid_gradient_compare_with_pytorch.py +++ b/tests/mindspore_test_framework/apps/test_reid_gradient_compare_with_pytorch.py @@ -53,6 +53,7 @@ verification_set = [ }) ] + @mindspore_test(pipeline_for_compare_inputs_grad_with_npy_for_case_by_case_config) def test_reid_check_gradient(): context.set_context(mode=context.PYNATIVE_MODE) diff --git a/tests/mindspore_test_framework/components/executor/check_exceptions.py b/tests/mindspore_test_framework/components/executor/check_exceptions.py index 15ac5911d2..e466926ca8 100644 --- a/tests/mindspore_test_framework/components/executor/check_exceptions.py +++ b/tests/mindspore_test_framework/components/executor/check_exceptions.py @@ -21,6 +21,7 @@ import pytest from ...components.icomponent import IExectorComponent from ...utils import keyword + class CheckExceptionsEC(IExectorComponent): """ Check if the function raises the expected Exception and the error message contains specified keywords if not None. @@ -32,6 +33,7 @@ class CheckExceptionsEC(IExectorComponent): 'error_keywords': ['TensorAdd', 'shape'] } """ + def __call__(self): f = self.function[keyword.block] args = self.inputs[keyword.desc_inputs] diff --git a/tests/mindspore_test_framework/components/executor/check_gradient_for_scalar_func.py b/tests/mindspore_test_framework/components/executor/check_gradient_for_scalar_func.py index fd18aef3ef..10d21a4149 100644 --- a/tests/mindspore_test_framework/components/executor/check_gradient_for_scalar_func.py +++ b/tests/mindspore_test_framework/components/executor/check_gradient_for_scalar_func.py @@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent from ...utils.check_gradient import check_gradient, ScalarGradChecker from ...utils.config_util import get_grad_checking_options + class CheckGradientForScalarFunctionEC(IExectorComponent): """ Check gradient against numeric with respect to inputs for scalar function, execute and verify. @@ -26,6 +27,7 @@ class CheckGradientForScalarFunctionEC(IExectorComponent): Examples: 'block': scalar_function """ + def __call__(self): f, args, delta, max_error, input_selector, output_selector, sampling_times, _ = \ get_grad_checking_options(self.function, self.inputs) diff --git a/tests/mindspore_test_framework/components/executor/check_gradient_wrt_inputs.py b/tests/mindspore_test_framework/components/executor/check_gradient_wrt_inputs.py index 05839d0595..9d07e54221 100644 --- a/tests/mindspore_test_framework/components/executor/check_gradient_wrt_inputs.py +++ b/tests/mindspore_test_framework/components/executor/check_gradient_wrt_inputs.py @@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent from ...utils.check_gradient import check_gradient, OperationGradChecker from ...utils.config_util import get_grad_checking_options + class CheckGradientWrtInputsEC(IExectorComponent): """ Check gradient against numeric with respect to inputs, execute and verify. @@ -35,6 +36,7 @@ class CheckGradientWrtInputsEC(IExectorComponent): key_act=None, initializer_range=0.02) """ + def __call__(self): f, args, delta, max_error, input_selector, output_selector, \ sampling_times, reduce_output = get_grad_checking_options(self.function, self.inputs) diff --git a/tests/mindspore_test_framework/components/executor/check_gradient_wrt_params.py b/tests/mindspore_test_framework/components/executor/check_gradient_wrt_params.py index 509b73d3e1..d364f6d903 100644 --- a/tests/mindspore_test_framework/components/executor/check_gradient_wrt_params.py +++ b/tests/mindspore_test_framework/components/executor/check_gradient_wrt_params.py @@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent from ...utils.check_gradient import check_gradient, NNGradChecker from ...utils.config_util import get_grad_checking_options + class CheckGradientWrtParamsEC(IExectorComponent): """ Check gradient against numeric with respect to params, execute and verify. @@ -35,6 +36,7 @@ class CheckGradientWrtParamsEC(IExectorComponent): key_act=None, initializer_range=0.02) """ + def __call__(self): f, args, delta, max_error, input_selector, output_selector, \ sampling_times, reduce_output = get_grad_checking_options(self.function, self.inputs) diff --git a/tests/mindspore_test_framework/components/executor/check_jacobian_for_scalar_func.py b/tests/mindspore_test_framework/components/executor/check_jacobian_for_scalar_func.py index 8239eaf297..1bb11cf62d 100644 --- a/tests/mindspore_test_framework/components/executor/check_jacobian_for_scalar_func.py +++ b/tests/mindspore_test_framework/components/executor/check_jacobian_for_scalar_func.py @@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent from ...utils.check_gradient import check_jacobian, ScalarGradChecker from ...utils.config_util import get_grad_checking_options + class CheckJacobianForScalarFunctionEC(IExectorComponent): """ Check jacobian against numeric with respect to inputs for scalar_func, execute and verify. @@ -26,6 +27,7 @@ class CheckJacobianForScalarFunctionEC(IExectorComponent): Examples: 'block': scalar_function """ + def __call__(self): f, args, delta, max_error, input_selector, output_selector, _, _ = \ get_grad_checking_options(self.function, self.inputs) diff --git a/tests/mindspore_test_framework/components/executor/check_jacobian_wrt_inputs.py b/tests/mindspore_test_framework/components/executor/check_jacobian_wrt_inputs.py index fa0d1b4ce1..03a5529881 100644 --- a/tests/mindspore_test_framework/components/executor/check_jacobian_wrt_inputs.py +++ b/tests/mindspore_test_framework/components/executor/check_jacobian_wrt_inputs.py @@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent from ...utils.check_gradient import check_jacobian, OperationGradChecker from ...utils.config_util import get_grad_checking_options + class CheckJacobianWrtInputsEC(IExectorComponent): """ Check jacobian against numeric with respect to inputs, execute and verify. @@ -35,6 +36,7 @@ class CheckJacobianWrtInputsEC(IExectorComponent): key_act=None, initializer_range=0.02) """ + def __call__(self): f, args, delta, max_error, input_selector, output_selector, _, _ = \ get_grad_checking_options(self.function, self.inputs) diff --git a/tests/mindspore_test_framework/components/executor/check_jacobian_wrt_params.py b/tests/mindspore_test_framework/components/executor/check_jacobian_wrt_params.py index c3a01c886d..83deb3ee6b 100644 --- a/tests/mindspore_test_framework/components/executor/check_jacobian_wrt_params.py +++ b/tests/mindspore_test_framework/components/executor/check_jacobian_wrt_params.py @@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent from ...utils.check_gradient import check_jacobian, NNGradChecker from ...utils.config_util import get_grad_checking_options + class CheckJacobianWrtParamsEC(IExectorComponent): """ Check jacobian against numeric with respect to params, execute and verify. @@ -35,6 +36,7 @@ class CheckJacobianWrtParamsEC(IExectorComponent): key_act=None, initializer_range=0.02) """ + def __call__(self): f, args, delta, max_error, input_selector, output_selector, _, _ = \ get_grad_checking_options(self.function, self.inputs) diff --git a/tests/mindspore_test_framework/components/executor/exec_and_verify_model_loss.py b/tests/mindspore_test_framework/components/executor/exec_and_verify_model_loss.py index 10ae33e17a..1f5ae7dd58 100644 --- a/tests/mindspore_test_framework/components/executor/exec_and_verify_model_loss.py +++ b/tests/mindspore_test_framework/components/executor/exec_and_verify_model_loss.py @@ -19,6 +19,7 @@ from ...components.icomponent import IExectorComponent from ...utils.model_util import Model from ...utils import keyword + class LossVerifierEC(IExectorComponent): """ Verify if the model can converge to expected loss. @@ -32,6 +33,7 @@ class LossVerifierEC(IExectorComponent): 'loss_upper_bound': 0.03, } """ + def __call__(self): model = self.function[keyword.block][keyword.model] loss = self.function[keyword.block][keyword.loss] diff --git a/tests/mindspore_test_framework/components/executor/exec_forward.py b/tests/mindspore_test_framework/components/executor/exec_forward.py index 45c8f69755..c4ea4626c9 100644 --- a/tests/mindspore_test_framework/components/executor/exec_forward.py +++ b/tests/mindspore_test_framework/components/executor/exec_forward.py @@ -18,10 +18,12 @@ from ...components.icomponent import IExectorComponent from ...utils import keyword + class IdentityEC(IExectorComponent): """ Execute function/inputs. """ + def __call__(self): result_id = self.function[keyword.id] + '-' + self.inputs[keyword.id] group = self.function[keyword.group] + '-' + self.inputs[keyword.group] diff --git a/tests/mindspore_test_framework/components/executor/exec_gradient.py b/tests/mindspore_test_framework/components/executor/exec_gradient.py index 27061aec25..3609316ec1 100644 --- a/tests/mindspore_test_framework/components/executor/exec_gradient.py +++ b/tests/mindspore_test_framework/components/executor/exec_gradient.py @@ -18,10 +18,12 @@ from ...components.icomponent import IExectorComponent from ...utils import keyword + class IdentityBackwardEC(IExectorComponent): """ Execute function/inputs, with all bprops attached, the bprop function created by BC should handle these bprops. """ + def __call__(self): result_id = self.function[keyword.id] + '-' + self.inputs[keyword.id] group = self.function[keyword.group] + '-' + self.inputs[keyword.group] diff --git a/tests/mindspore_test_framework/components/expect_result_policy/cartesian_product_on_group_for_expect_result.py b/tests/mindspore_test_framework/components/expect_result_policy/cartesian_product_on_group_for_expect_result.py index c5535370c1..8ae177bfcf 100644 --- a/tests/mindspore_test_framework/components/expect_result_policy/cartesian_product_on_group_for_expect_result.py +++ b/tests/mindspore_test_framework/components/expect_result_policy/cartesian_product_on_group_for_expect_result.py @@ -18,10 +18,12 @@ from ...components.icomponent import IERPolicyComponent from ...utils import keyword + class GroupCartesianProductERPC(IERPolicyComponent): """ Combine expect/result by do cartesian product on group. """ + def __call__(self): ret = [(s1, s2) for s1 in self.expect for s2 in self.result if s1[keyword.group] == s2[keyword.group]] return ret diff --git a/tests/mindspore_test_framework/components/expect_result_policy/cartesian_product_on_id_for_expect_result.py b/tests/mindspore_test_framework/components/expect_result_policy/cartesian_product_on_id_for_expect_result.py index 1f44b7015e..2815785af6 100644 --- a/tests/mindspore_test_framework/components/expect_result_policy/cartesian_product_on_id_for_expect_result.py +++ b/tests/mindspore_test_framework/components/expect_result_policy/cartesian_product_on_id_for_expect_result.py @@ -18,10 +18,12 @@ from ...components.icomponent import IERPolicyComponent from ...utils import keyword + class IdCartesianProductERPC(IERPolicyComponent): """ Combine expect/result by do cartesian product on id. """ + def __call__(self): ret = [(s1, s2) for s1 in self.expect for s2 in self.result if s1[keyword.id] == s2[keyword.id]] return ret diff --git a/tests/mindspore_test_framework/components/facade/me_facade.py b/tests/mindspore_test_framework/components/facade/me_facade.py index 3abde60c6b..457037509e 100644 --- a/tests/mindspore_test_framework/components/facade/me_facade.py +++ b/tests/mindspore_test_framework/components/facade/me_facade.py @@ -21,6 +21,7 @@ from ...components.icomponent import IFacadeComponent from ...utils.facade_util import get_block_config, fill_block_config from ...utils import keyword + class MeFacadeFC(IFacadeComponent): """ Transform ME style config to mindspore_test_framework style. @@ -47,6 +48,7 @@ class MeFacadeFC(IFacadeComponent): } }) """ + def __call__(self): ret = get_block_config() for config in self.verification_set: diff --git a/tests/mindspore_test_framework/components/function/compile_block.py b/tests/mindspore_test_framework/components/function/compile_block.py index 8c226c6198..57f9a1768f 100644 --- a/tests/mindspore_test_framework/components/function/compile_block.py +++ b/tests/mindspore_test_framework/components/function/compile_block.py @@ -18,6 +18,7 @@ from ...components.icomponent import IBuilderComponent from ...utils.block_util import compile_block, gen_net, create_funcs + class CompileBlockBC(IBuilderComponent): """ Build a function that do mindspore compile. @@ -42,5 +43,6 @@ class CompileBlockBC(IBuilderComponent): dtype=mstype.float32, compute_type=mstype.float32) """ + def __call__(self): return create_funcs(self.verification_set, gen_net, compile_block) diff --git a/tests/mindspore_test_framework/components/function/compile_gradient_wrt_inputs.py b/tests/mindspore_test_framework/components/function/compile_gradient_wrt_inputs.py index 829141c2f1..83c76c4cdb 100644 --- a/tests/mindspore_test_framework/components/function/compile_gradient_wrt_inputs.py +++ b/tests/mindspore_test_framework/components/function/compile_gradient_wrt_inputs.py @@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation from ...components.icomponent import IBuilderComponent from ...utils.block_util import compile_block, gen_grad_net, create_funcs + class CompileBackwardBlockWrtInputsBC(IBuilderComponent): """ Build a function that do mindspore gradient compile with respect to inputs. @@ -43,6 +44,7 @@ class CompileBackwardBlockWrtInputsBC(IBuilderComponent): dtype=mstype.float32, compute_type=mstype.float32) """ + def __call__(self): grad_op = GradOperation('grad', get_all=True, sens_param=True) return create_funcs(self.verification_set, gen_grad_net, compile_block, grad_op) diff --git a/tests/mindspore_test_framework/components/function/compile_gradient_wrt_params.py b/tests/mindspore_test_framework/components/function/compile_gradient_wrt_params.py index 7f9ac3e05e..15cc02b3f4 100644 --- a/tests/mindspore_test_framework/components/function/compile_gradient_wrt_params.py +++ b/tests/mindspore_test_framework/components/function/compile_gradient_wrt_params.py @@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation from ...components.icomponent import IBuilderComponent from ...utils.block_util import compile_block, gen_grad_net, create_funcs + class CompileBackwardBlockWrtParamsBC(IBuilderComponent): """ Build a function that do mindspore gradient compile with respect to params. @@ -43,6 +44,7 @@ class CompileBackwardBlockWrtParamsBC(IBuilderComponent): dtype=mstype.float32, compute_type=mstype.float32) """ + def __call__(self, verification_set): grad_op = GradOperation('grad', get_by_list=True, sens_param=True) return create_funcs(self.verification_set, gen_grad_net, compile_block, grad_op) diff --git a/tests/mindspore_test_framework/components/function/get_function_from_config.py b/tests/mindspore_test_framework/components/function/get_function_from_config.py index 0960e45302..6b67c6dfb9 100644 --- a/tests/mindspore_test_framework/components/function/get_function_from_config.py +++ b/tests/mindspore_test_framework/components/function/get_function_from_config.py @@ -18,6 +18,7 @@ from ...components.icomponent import IBuilderComponent from ...utils import keyword + class IdentityBC(IBuilderComponent): """ Return function. @@ -25,5 +26,6 @@ class IdentityBC(IBuilderComponent): Examples: 'function': Add """ + def __call__(self): return self.verification_set[keyword.function] diff --git a/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_block.py b/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_block.py index 5789e39eb4..ecead19118 100644 --- a/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_block.py +++ b/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_block.py @@ -18,6 +18,7 @@ from ...components.icomponent import IBuilderComponent from ...utils.block_util import run_block, get_uniform_with_shape, gen_net, create_funcs + class RunBlockWithRandParamBC(IBuilderComponent): """ Build a function with uniformed params that run mindspore pipeline. @@ -42,5 +43,6 @@ class RunBlockWithRandParamBC(IBuilderComponent): dtype=mstype.float32, compute_type=mstype.float32) """ + def __call__(self): return create_funcs(self.verification_set, gen_net, run_block, default_rand_func=get_uniform_with_shape) diff --git a/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_inputs.py b/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_inputs.py index 69fcfdc316..2dcf807328 100644 --- a/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_inputs.py +++ b/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_inputs.py @@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation from ...components.icomponent import IBuilderComponent from ...utils.block_util import run_block, gen_grad_net, create_funcs, get_uniform_with_shape + class RunBackwardBlockWrtInputsWithRandParamBC(IBuilderComponent): def __call__(self): grad_op = GradOperation('grad', get_all=True, sens_param=True) diff --git a/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_params.py b/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_params.py index 447531ffaa..22f03194c4 100644 --- a/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_params.py +++ b/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_params.py @@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation from ...components.icomponent import IBuilderComponent from ...utils.block_util import run_block, gen_grad_net, create_funcs, get_uniform_with_shape + class RunBackwardBlockWrtParamsWithRandParamBC(IBuilderComponent): def __call__(self): grad_op = GradOperation('grad', get_by_list=True, sens_param=True) diff --git a/tests/mindspore_test_framework/components/function/run_block.py b/tests/mindspore_test_framework/components/function/run_block.py index 8340bb7b23..5c3dc27d5e 100644 --- a/tests/mindspore_test_framework/components/function/run_block.py +++ b/tests/mindspore_test_framework/components/function/run_block.py @@ -18,6 +18,7 @@ from ...components.icomponent import IBuilderComponent from ...utils.block_util import run_block, gen_net, create_funcs + class RunBlockBC(IBuilderComponent): """ Build a function that run mindspore pipeline. @@ -42,5 +43,6 @@ class RunBlockBC(IBuilderComponent): dtype=mstype.float32, compute_type=mstype.float32) """ + def __call__(self): return create_funcs(self.verification_set, gen_net, run_block) diff --git a/tests/mindspore_test_framework/components/function/run_gradient_wrt_inputs.py b/tests/mindspore_test_framework/components/function/run_gradient_wrt_inputs.py index 699fc74322..aa7ffad79c 100644 --- a/tests/mindspore_test_framework/components/function/run_gradient_wrt_inputs.py +++ b/tests/mindspore_test_framework/components/function/run_gradient_wrt_inputs.py @@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation from ...components.icomponent import IBuilderComponent from ...utils.block_util import run_block, gen_grad_net, create_funcs + class RunBackwardBlockWrtInputsBC(IBuilderComponent): def __call__(self): grad_op = GradOperation('grad', get_all=True, sens_param=True) diff --git a/tests/mindspore_test_framework/components/function/run_gradient_wrt_params.py b/tests/mindspore_test_framework/components/function/run_gradient_wrt_params.py index 90e58ac763..d365a00230 100644 --- a/tests/mindspore_test_framework/components/function/run_gradient_wrt_params.py +++ b/tests/mindspore_test_framework/components/function/run_gradient_wrt_params.py @@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation from ...components.icomponent import IBuilderComponent from ...utils.block_util import run_block, gen_grad_net, create_funcs + class RunBackwardBlockWrtParamsBC(IBuilderComponent): def __call__(self): grad_op = GradOperation('grad', get_by_list=True, sens_param=True) diff --git a/tests/mindspore_test_framework/components/function_inputs_policy/cartesian_product_on_group_for_function_inputs.py b/tests/mindspore_test_framework/components/function_inputs_policy/cartesian_product_on_group_for_function_inputs.py index 13925ee73b..4556b76723 100644 --- a/tests/mindspore_test_framework/components/function_inputs_policy/cartesian_product_on_group_for_function_inputs.py +++ b/tests/mindspore_test_framework/components/function_inputs_policy/cartesian_product_on_group_for_function_inputs.py @@ -23,6 +23,7 @@ class GroupCartesianProductFIPC(IFIPolicyComponent): """ Combine function/inputs by do cartesian product on group. """ + def __call__(self): ret = [(s1, s2) for s1 in self.function for s2 in self.inputs if s1[keyword.group] == s2[keyword.group]] return ret diff --git a/tests/mindspore_test_framework/components/function_inputs_policy/cartesian_product_on_id_for_function_inputs.py b/tests/mindspore_test_framework/components/function_inputs_policy/cartesian_product_on_id_for_function_inputs.py index 2ebae6072e..1247738123 100644 --- a/tests/mindspore_test_framework/components/function_inputs_policy/cartesian_product_on_id_for_function_inputs.py +++ b/tests/mindspore_test_framework/components/function_inputs_policy/cartesian_product_on_id_for_function_inputs.py @@ -18,10 +18,12 @@ from ...components.icomponent import IFIPolicyComponent from ...utils import keyword + class IdCartesianProductFIPC(IFIPolicyComponent): """ Combine function/inputs by do cartesian product on id. """ + def __call__(self): ret = [(s1, s2) for s1 in self.function for s2 in self.inputs if s1[keyword.id] == s2[keyword.id]] return ret diff --git a/tests/mindspore_test_framework/components/icomponent.py b/tests/mindspore_test_framework/components/icomponent.py index 8b535ba39a..4ee8d2e743 100644 --- a/tests/mindspore_test_framework/components/icomponent.py +++ b/tests/mindspore_test_framework/components/icomponent.py @@ -15,8 +15,10 @@ """Component interfaces.""" + class IComponent: """Component interfaces.""" + def __init__(self, verification_set): self.verification_set = verification_set @@ -26,18 +28,21 @@ class IComponent: class IDataComponent(IComponent): """Create inputs for verification_set.""" + def __call__(self): raise NotImplementedError class IBuilderComponent(IComponent): """Build system under test.""" + def __call__(self): raise NotImplementedError class IExectorComponent(IComponent): """Execute sut, take (function, input) pairs as input.""" + def __init__(self, verification_set, function, inputs): super(IExectorComponent, self).__init__(verification_set) self.function = function @@ -49,6 +54,7 @@ class IExectorComponent(IComponent): class IVerifierComponent(IComponent): """Verify sut result, take (expect, result) pairs as input.""" + def __init__(self, verification_set, expect, result): super(IVerifierComponent, self).__init__(verification_set) self.expect = expect @@ -60,6 +66,7 @@ class IVerifierComponent(IComponent): class IFIPolicyComponent(IComponent): """Combine functions/inputs.""" + def __init__(self, verification_set, function, inputs): super(IFIPolicyComponent, self).__init__(verification_set) self.function = function @@ -71,6 +78,7 @@ class IFIPolicyComponent(IComponent): class IERPolicyComponent(IComponent): """Combine expects and results.""" + def __init__(self, verification_set, expect, result): super(IERPolicyComponent, self).__init__(verification_set) self.expect = expect @@ -82,5 +90,6 @@ class IERPolicyComponent(IComponent): class IFacadeComponent(IComponent): """Adapt verification_set.""" + def __call__(self): raise NotImplementedError diff --git a/tests/mindspore_test_framework/components/inputs/generate_dataset_for_linear_regression.py b/tests/mindspore_test_framework/components/inputs/generate_dataset_for_linear_regression.py index babda92310..0d7de396a8 100644 --- a/tests/mindspore_test_framework/components/inputs/generate_dataset_for_linear_regression.py +++ b/tests/mindspore_test_framework/components/inputs/generate_dataset_for_linear_regression.py @@ -19,6 +19,7 @@ from ...components.icomponent import IDataComponent from ...utils.dataset_util import generate_dataset_for_linear_regression from ...utils import keyword + class GenerateDataSetForLRDC(IDataComponent): """ Create dataset for linear regression, with salt from normal distribution. @@ -30,6 +31,7 @@ class GenerateDataSetForLRDC(IDataComponent): 'batch_size': 20, } """ + def __call__(self): result = [] for config in self.verification_set[keyword.inputs]: diff --git a/tests/mindspore_test_framework/components/inputs/generate_inputs_from_shape.py b/tests/mindspore_test_framework/components/inputs/generate_inputs_from_shape.py index dd17804c4e..cb725ca648 100644 --- a/tests/mindspore_test_framework/components/inputs/generate_inputs_from_shape.py +++ b/tests/mindspore_test_framework/components/inputs/generate_inputs_from_shape.py @@ -23,6 +23,7 @@ from ...utils.other_util import shape2tensor from ...utils.config_util import get_input_config from ...utils import keyword + class GenerateFromShapeDC(IDataComponent): """ Generate inputs from shape, desc_inputs must be configured, desc_bprop is optional. @@ -41,6 +42,7 @@ class GenerateFromShapeDC(IDataComponent): ([1, 16, 128, 64], np.float32, 6), # (inputs, dtype, scale) ] """ + def __call__(self): result = [] for config in self.verification_set[keyword.inputs]: diff --git a/tests/mindspore_test_framework/components/inputs/get_inputs_from_config.py b/tests/mindspore_test_framework/components/inputs/get_inputs_from_config.py index c158cb114a..aa6516ef02 100644 --- a/tests/mindspore_test_framework/components/inputs/get_inputs_from_config.py +++ b/tests/mindspore_test_framework/components/inputs/get_inputs_from_config.py @@ -17,6 +17,7 @@ from ...components.icomponent import IDataComponent + class IdentityDC(IDataComponent): """ Return inputs. @@ -26,5 +27,6 @@ class IdentityDC(IDataComponent): np.array([[2, 2], [2, 2]]).astype(np.float32) ] """ + def __call__(self): return self.verification_set['inputs'] diff --git a/tests/mindspore_test_framework/components/inputs/load_inputs_from_npy.py b/tests/mindspore_test_framework/components/inputs/load_inputs_from_npy.py index 21e5e4616d..4e3b6b0ec6 100644 --- a/tests/mindspore_test_framework/components/inputs/load_inputs_from_npy.py +++ b/tests/mindspore_test_framework/components/inputs/load_inputs_from_npy.py @@ -19,6 +19,7 @@ from ...components.icomponent import IDataComponent from ...utils.npy_util import load_data_from_npy_or_shape from ...utils import keyword + class LoadFromNpyDC(IDataComponent): """ Load inputs from npy data, inputs could be shape/tensor/np.ndarray/file path. @@ -43,6 +44,7 @@ class LoadFromNpyDC(IDataComponent): ([2, 2], np.float32, 6) ] """ + def __call__(self): result = [] for config in self.verification_set[keyword.inputs]: diff --git a/tests/mindspore_test_framework/components/verifier/compare_forward.py b/tests/mindspore_test_framework/components/verifier/compare_forward.py index bcef20348d..196fc263f8 100644 --- a/tests/mindspore_test_framework/components/verifier/compare_forward.py +++ b/tests/mindspore_test_framework/components/verifier/compare_forward.py @@ -19,6 +19,7 @@ from ...components.icomponent import IVerifierComponent from ...utils.compare_util import compare from ...utils import keyword + class CompareWithVC(IVerifierComponent): """ Compare the result with baseline functions configured in 'compare' config item. @@ -41,5 +42,6 @@ class CompareWithVC(IVerifierComponent): 'max_error': 1e-3 } """ + def __call__(self): compare(self.expect, self.func_result, baseline=keyword.compare_with) diff --git a/tests/mindspore_test_framework/components/verifier/compare_gradient.py b/tests/mindspore_test_framework/components/verifier/compare_gradient.py index 09919ed219..45f39d3d42 100644 --- a/tests/mindspore_test_framework/components/verifier/compare_gradient.py +++ b/tests/mindspore_test_framework/components/verifier/compare_gradient.py @@ -19,6 +19,7 @@ from ...components.icomponent import IVerifierComponent from ...utils.compare_util import compare from ...utils import keyword + class CompareGradientWithVC(IVerifierComponent): """ Compare the result with baseline functions configured in 'compare_gradient_with' config item. @@ -35,5 +36,6 @@ class CompareGradientWithVC(IVerifierComponent): 'max_error': 1e-3 } """ + def __call__(self): compare(self.expect, self.func_result, baseline=keyword.compare_gradient_with) diff --git a/tests/mindspore_test_framework/components/verifier/verify_expect_from_npy.py b/tests/mindspore_test_framework/components/verifier/verify_expect_from_npy.py index 19e9bbffaa..e4523fdaf9 100644 --- a/tests/mindspore_test_framework/components/verifier/verify_expect_from_npy.py +++ b/tests/mindspore_test_framework/components/verifier/verify_expect_from_npy.py @@ -22,6 +22,7 @@ from ...utils.npy_util import load_data_from_npy_or_shape from ...utils.verifier_util import tolerance_assert from ...utils import keyword + class LoadFromNpyVC(IVerifierComponent): """ Verify if the results are like expects from npy data, expects could be shape/tensor/np.ndarray/file path. @@ -37,6 +38,7 @@ class LoadFromNpyVC(IVerifierComponent): ([2, 2], np.float32, 6, 1e-3) # (shape, dtype, scale, max_error) ] """ + def __call__(self): dpaths = self.expect.get(keyword.desc_expect) expects = load_data_from_npy_or_shape(dpaths, False) diff --git a/tests/mindspore_test_framework/components/verifier/verify_shapetype.py b/tests/mindspore_test_framework/components/verifier/verify_shapetype.py index 9ba3371e45..bf6548197b 100644 --- a/tests/mindspore_test_framework/components/verifier/verify_shapetype.py +++ b/tests/mindspore_test_framework/components/verifier/verify_shapetype.py @@ -19,6 +19,7 @@ from ...components.icomponent import IVerifierComponent from ...utils.other_util import to_numpy_list from ...utils import keyword + class ShapeTypeVC(IVerifierComponent): """ Verify if the result's shape and type are correct. @@ -33,6 +34,7 @@ class ShapeTypeVC(IVerifierComponent): ] } """ + def __call__(self): results = to_numpy_list(self.func_result[keyword.result]) expects = self.expect[keyword.desc_expect][keyword.shape_type] diff --git a/tests/mindspore_test_framework/mindspore_test.py b/tests/mindspore_test_framework/mindspore_test.py index fde26c94f1..0413d20c96 100644 --- a/tests/mindspore_test_framework/mindspore_test.py +++ b/tests/mindspore_test_framework/mindspore_test.py @@ -18,10 +18,11 @@ import logging import pytest from .components.icomponent import IDataComponent, IBuilderComponent, IExectorComponent, \ - IVerifierComponent, IFIPolicyComponent, IERPolicyComponent, IComponent, \ - IFacadeComponent + IVerifierComponent, IFIPolicyComponent, IERPolicyComponent, IComponent, \ + IFacadeComponent from .utils import keyword + def mindspore_test(verification_pipeline): """ Run verification pipeline. @@ -31,6 +32,7 @@ def mindspore_test(verification_pipeline): Returns: """ + def decorate(get_verification_set): verification_set = get_verification_set() diff --git a/tests/mindspore_test_framework/pipeline/forward/compare_forward.py b/tests/mindspore_test_framework/pipeline/forward/compare_forward.py index 77b62c0ac6..184e99dccd 100644 --- a/tests/mindspore_test_framework/pipeline/forward/compare_forward.py +++ b/tests/mindspore_test_framework/pipeline/forward/compare_forward.py @@ -107,7 +107,7 @@ Example: ] } """ -pipeline_for_compare_forward_with_npy_for_group_by_group_config =\ +pipeline_for_compare_forward_with_npy_for_group_by_group_config = \ [LoadFromNpyDC, RunBlockWithRandParamBC, IdCartesianProductFIPC, IdentityEC, IdCartesianProductERPC, LoadFromNpyVC] @@ -161,7 +161,7 @@ Example: ] } """ -pipeline_for_compare_forward_with_npy_for_group_by_group_config_using_group_policy =\ +pipeline_for_compare_forward_with_npy_for_group_by_group_config_using_group_policy = \ [LoadFromNpyDC, RunBlockWithRandParamBC, GroupCartesianProductFIPC, IdentityEC, IdCartesianProductERPC, LoadFromNpyVC] diff --git a/tests/mindspore_test_framework/pipeline/gradient/compare_gradient.py b/tests/mindspore_test_framework/pipeline/gradient/compare_gradient.py index 205f49d894..f6ff132294 100644 --- a/tests/mindspore_test_framework/pipeline/gradient/compare_gradient.py +++ b/tests/mindspore_test_framework/pipeline/gradient/compare_gradient.py @@ -49,7 +49,7 @@ Example: }) ] """ -pipeline_for_compare_inputs_grad_with_user_defined_for_case_by_case_config =\ +pipeline_for_compare_inputs_grad_with_user_defined_for_case_by_case_config = \ [MeFacadeFC, GenerateFromShapeDC, RunBackwardBlockWrtInputsBC, IdCartesianProductFIPC, IdentityBackwardEC, IdCartesianProductERPC, @@ -89,7 +89,7 @@ Example: }) ] """ -pipeline_for_compare_inputs_grad_with_npy_for_case_by_case_config =\ +pipeline_for_compare_inputs_grad_with_npy_for_case_by_case_config = \ [MeFacadeFC, LoadFromNpyDC, RunBackwardBlockWrtInputsBC, IdCartesianProductFIPC, IdentityBackwardEC, IdCartesianProductERPC, LoadFromNpyVC] @@ -128,7 +128,7 @@ Example: }) ] """ -pipeline_for_compare_params_grad_with_npy_for_case_by_case_config =\ +pipeline_for_compare_params_grad_with_npy_for_case_by_case_config = \ [MeFacadeFC, LoadFromNpyDC, RunBackwardBlockWrtParamsBC, IdCartesianProductFIPC, IdentityBackwardEC, IdCartesianProductERPC, LoadFromNpyVC] @@ -146,7 +146,7 @@ Example: }) ] """ -pipeline_for_compare_inputs_grad_with_numerical_diff_for_case_by_case_config =\ +pipeline_for_compare_inputs_grad_with_numerical_diff_for_case_by_case_config = \ [MeFacadeFC, GenerateFromShapeDC, IdentityBC, IdCartesianProductFIPC, CheckGradientWrtInputsEC] @@ -244,7 +244,7 @@ Example: }), ] """ -pipeline_for_compare_inputs_jacobian_with_numerical_diff_for_case_by_case_config =\ +pipeline_for_compare_inputs_jacobian_with_numerical_diff_for_case_by_case_config = \ [MeFacadeFC, GenerateFromShapeDC, IdentityBC, IdCartesianProductFIPC, CheckJacobianWrtInputsEC] diff --git a/tests/mindspore_test_framework/pipeline/gradient/compile_gradient.py b/tests/mindspore_test_framework/pipeline/gradient/compile_gradient.py index b0dcae0d98..95219a71fd 100644 --- a/tests/mindspore_test_framework/pipeline/gradient/compile_gradient.py +++ b/tests/mindspore_test_framework/pipeline/gradient/compile_gradient.py @@ -35,7 +35,7 @@ Example: }) ] """ -pipeline_for_compile_grad_anf_graph_for_case_by_case_config =\ +pipeline_for_compile_grad_anf_graph_for_case_by_case_config = \ [MeFacadeFC, GenerateFromShapeDC, CompileBackwardBlockWrtInputsBC, IdCartesianProductFIPC, IdentityBackwardEC] @@ -51,6 +51,6 @@ Example: }) ] """ -pipeline_for_compile_grad_ge_graph_for_case_by_case_config =\ +pipeline_for_compile_grad_ge_graph_for_case_by_case_config = \ [MeFacadeFC, GenerateFromShapeDC, RunBackwardBlockWrtInputsBC, IdCartesianProductFIPC, IdentityBackwardEC] diff --git a/tests/mindspore_test_framework/utils/block_util.py b/tests/mindspore_test_framework/utils/block_util.py index 28a3c62b31..3ff6d27c14 100644 --- a/tests/mindspore_test_framework/utils/block_util.py +++ b/tests/mindspore_test_framework/utils/block_util.py @@ -28,10 +28,12 @@ from mindspore.ops import operations as P from mindspore import ParameterTuple from . import keyword + def get_uniform_with_shape(shape): np.random.seed(1) return np.random.uniform(-0.1, 0.1, size=shape).astype(np.float32) + def set_block_param_with_rand(net, rand_func=None): if not isinstance(net, nn.Cell) or rand_func is None: return @@ -39,11 +41,13 @@ def set_block_param_with_rand(net, rand_func=None): for param in net.trainable_params(): param.default_input = Tensor(rand_func(param.default_input.asnumpy().shape)) + def compile_block(net, *inputs, rand_func=None, training=True): set_block_training(net, training) set_block_param_with_rand(net, rand_func) return _executor.compile(net, *inputs) + def run_block(net, *inputs, rand_func=None, training=True): set_block_training(net, training) set_block_param_with_rand(net, rand_func) @@ -52,10 +56,13 @@ def run_block(net, *inputs, rand_func=None, training=True): @ms_function def _func_pynative(*inputs): return net(*inputs) + return _func_pynative(*inputs) + return func_pynative(*inputs) return net(*inputs) + class IthOutputCell(nn.Cell): def __init__(self, network, output_index): if isinstance(network, nn.Cell): @@ -69,12 +76,14 @@ class IthOutputCell(nn.Cell): predict = self.network(*inputs)[self.output_index] return predict + def get_output_cell(network, num_input, output_index, training=True): _ = num_input net = IthOutputCell(network, output_index) set_block_training(net, training) return net + class OutputReduceSumCell(nn.Cell): def __init__(self, network, output_num): super(OutputReduceSumCell, self).__init__() @@ -92,11 +101,13 @@ class OutputReduceSumCell(nn.Cell): ret = ret + F.make_tuple(predict_reduce) return ret + def get_output_reduce_cell(network, output_num, training=True): net = OutputReduceSumCell(network, output_num) set_block_training(net, training) return net + class InputOpNet(nn.Cell): def __init__(self, op, c1=None, c2=None, c3=None, c4=None): super(InputOpNet, self).__init__() @@ -112,6 +123,7 @@ class InputOpNet(nn.Cell): def construct0_c0_fake(self, data): x = self.op() + data return x + def construct0_c1_fake(self, data): x = self.op(self.c1) + data return x @@ -212,6 +224,7 @@ class InputOpNet(nn.Cell): x = self.op(x1, x2, x3, x4, x5, self.c1, self.c2, self.c3, self.c4) return x + def gen_net(op, input_num, training=True, desc_const=(), const_first=False, add_fake_input=False): if isinstance(op, nn.Cell): return op @@ -227,6 +240,7 @@ def gen_net(op, input_num, training=True, desc_const=(), const_first=False, add_ set_block_training(net, training) return net + class OperationBackward(nn.Cell): def __init__(self, network, grad_op, sens): if isinstance(network, nn.Cell): @@ -240,6 +254,7 @@ class OperationBackward(nn.Cell): def construct(self, *inputs): return self.grad(self.network)(*inputs, self.sens) + class OperationBackwardWithNoSens(nn.Cell): def __init__(self, network, grad_op): if isinstance(network, nn.Cell): @@ -252,6 +267,7 @@ class OperationBackwardWithNoSens(nn.Cell): def construct(self, *inputs): return self.grad(self.network)(*inputs) + class NNBackward(nn.Cell): def __init__(self, network, grad_op, sens): if isinstance(network, nn.Cell): @@ -266,6 +282,7 @@ class NNBackward(nn.Cell): def construct(self, *inputs): return self.grad(self.network, self.params)(*inputs, self.sens) + class NNBackwardWithNoSens(nn.Cell): def __init__(self, network, grad_op): if isinstance(network, nn.Cell): @@ -279,6 +296,7 @@ class NNBackwardWithNoSens(nn.Cell): def construct(self, *inputs): return self.grad(self.network, self.params)(*inputs) + def gen_grad_net(net, grad_op, input_num, sens=None, training=True, desc_const=(), const_first=False, add_fake_input=False): if not isinstance(net, nn.Cell): @@ -296,14 +314,17 @@ def gen_grad_net(net, grad_op, input_num, sens=None, training=True, desc_const=( set_block_training(net, training) return net + def set_block_training(net, training=True): if isinstance(net, nn.Cell): net.set_train(training) + def set_block_phase(net, phase='train'): if isinstance(net, nn.Cell): net.phase = phase + def create_funcs(verification_set, block_generator, block_runner, grad_op=None, default_rand_func=None): def create_func(block, num_outputs, rand_func, desc_const, const_first, add_fake_input, split_outputs): def function(*inputs): @@ -347,6 +368,7 @@ def create_funcs(verification_set, block_generator, block_runner, grad_op=None, b = block_generator(block, inputs_num, desc_const=desc_const, const_first=const_first, add_fake_input=add_fake_input) return block_runner(b, *inputs, rand_func=rand_func) + return function bc_configs = verification_set[keyword.function] diff --git a/tests/mindspore_test_framework/utils/bprop_util.py b/tests/mindspore_test_framework/utils/bprop_util.py index 35d4f39335..a883103190 100644 --- a/tests/mindspore_test_framework/utils/bprop_util.py +++ b/tests/mindspore_test_framework/utils/bprop_util.py @@ -21,10 +21,12 @@ from mindspore.nn import Cell from mindspore.common import ParameterTuple from mindspore.common.api import ms_function + class Bprop(Cell): """ The gradient wraper. """ + def __init__(self, func, wrt_params, params, grad_op, sens): super(Bprop, self).__init__(auto_prefix=False) self.func = func @@ -50,6 +52,7 @@ class Bprop(Cell): else: return self.grad(self.func)(*inputs) + def bprop(func, *inputs, grads_wrt_outputs=None, wrt: list = None, params: list = None): """ Compute gradients of function. @@ -90,6 +93,8 @@ def bprop(func, *inputs, grads_wrt_outputs=None, wrt: list = None, params: list @ms_function def _func_pynative(*inputs): return grad(*inputs) + return _func_pynative(*inputs) + return func_pynative(*inputs) return grad(*inputs) diff --git a/tests/mindspore_test_framework/utils/check_gradient.py b/tests/mindspore_test_framework/utils/check_gradient.py index fea7e22e55..921c973c65 100644 --- a/tests/mindspore_test_framework/utils/check_gradient.py +++ b/tests/mindspore_test_framework/utils/check_gradient.py @@ -27,6 +27,7 @@ import mindspore._c_expression as _c_expression from .block_util import get_output_cell, gen_net, gen_grad_net, \ get_uniform_with_shape, set_block_phase, get_output_reduce_cell, set_block_param_with_rand + class _GradChecker: """ Check the theoretical Jacobian against numeric @@ -130,6 +131,7 @@ class _GradChecker: @ms_function def _func_pynative(*inputs): return net(*inputs) + return _func_pynative(*inputs) return func_forward_pynative @@ -277,7 +279,7 @@ class _GradChecker: print('GradChecker.compute_theoretical.args', args) gout = self.wrap(self.gfns[out_index](*args)) gout = [self.to_numpy_and_scale(g) if isinstance(g, _c_expression.Tensor) \ - else self.to_numpy_and_scale(np.array(g)) for g in gout] + else self.to_numpy_and_scale(np.array(g)) for g in gout] print('GradChecker.compute_theoretical.gout', gout) dy_mask.ravel().view()[jacobian_col] = 0.0 @@ -433,6 +435,7 @@ def check_gradient(fn, *args, delta=1e-3, max_error=1e-3, reduce_output=reduce_output) grad_checker.assert_match() + def check_jacobian(fn, *args, delta=1e-3, max_error=1e-3, grad_checker_class=OperationGradChecker, input_selector=None, diff --git a/tests/mindspore_test_framework/utils/compare_util.py b/tests/mindspore_test_framework/utils/compare_util.py index 178d4dab07..2c13301a79 100644 --- a/tests/mindspore_test_framework/utils/compare_util.py +++ b/tests/mindspore_test_framework/utils/compare_util.py @@ -19,6 +19,7 @@ import numpy as np from .other_util import to_numpy_list from . import keyword + def compare(expect, func_result, baseline): """ Compare results of function with baseline functions. diff --git a/tests/mindspore_test_framework/utils/config_util.py b/tests/mindspore_test_framework/utils/config_util.py index b7be15ac23..ec816e9a41 100644 --- a/tests/mindspore_test_framework/utils/config_util.py +++ b/tests/mindspore_test_framework/utils/config_util.py @@ -20,6 +20,7 @@ import numpy as np from . import keyword from .other_util import select_from_config_tuple + def get_input_config(d): """ Get input config. @@ -38,6 +39,7 @@ def get_input_config(d): scale = ext_config.get(keyword.scale, 1) return s, dtype, scale + def get_expect_config(d): """ Get input config. @@ -66,6 +68,7 @@ def get_expect_config(d): absolute_tolerance = ext_config.get(keyword.absolute_tolerance, 0.0) return s, dtype, scale, max_error, check_tolerance, relative_tolerance, absolute_tolerance + def get_function_config(function): """ Get input config. @@ -91,6 +94,7 @@ def get_function_config(function): return delta, max_error, input_selector, output_selector, sampling_times, \ reduce_output, init_param_with, split_outputs, exception, error_keywords + def get_grad_checking_options(function, inputs): """ Get input config. diff --git a/tests/mindspore_test_framework/utils/dataset_util.py b/tests/mindspore_test_framework/utils/dataset_util.py index bc3a2956f7..8953b5d81a 100644 --- a/tests/mindspore_test_framework/utils/dataset_util.py +++ b/tests/mindspore_test_framework/utils/dataset_util.py @@ -19,6 +19,7 @@ import random import numpy as np from mindspore import Tensor + def generate_dataset_for_linear_regression(true_w, true_b, num_samples, batch_size): features = np.random.normal(scale=1, size=(num_samples, len(true_w))) labels = np.matmul(features, np.reshape(np.array(true_w), (-1, 1))) + true_b diff --git a/tests/mindspore_test_framework/utils/debug_util.py b/tests/mindspore_test_framework/utils/debug_util.py index 7b2978a6da..2aadb28cee 100644 --- a/tests/mindspore_test_framework/utils/debug_util.py +++ b/tests/mindspore_test_framework/utils/debug_util.py @@ -24,9 +24,10 @@ from mindspore.ops._grad.grad_base import bprop_getters from mindspore.ops.primitive import prim_attr_register, PrimitiveWithInfer logging.basicConfig(level=logging.DEBUG, format= - '[%(levelname)s] %(asctime)s %(pathname)s:%(lineno)d %(message)s') +'[%(levelname)s] %(asctime)s %(pathname)s:%(lineno)d %(message)s') logger = logging.getLogger(__name__) + class PrintShapeType(PrimitiveWithInfer): """ PrintShapeType input's shape and type. @@ -78,14 +79,17 @@ class PrintShapeType(PrimitiveWithInfer): @bprop_getters.register(PrintShapeType) def get_bprop_print_shape_type(self): """Generate bprop for PrintShapeType""" + def bprop(x, out, dout): return (dout,) + return bprop class PrintShapeTypeCell(nn.Cell): def __init__(self): super(PrintShapeTypeCell, self).__init__() + def construct(self, msg, x): PrintShapeType(msg)(x) return x diff --git a/tests/mindspore_test_framework/utils/facade_util.py b/tests/mindspore_test_framework/utils/facade_util.py index 4c5896b3df..c039fa4e5e 100644 --- a/tests/mindspore_test_framework/utils/facade_util.py +++ b/tests/mindspore_test_framework/utils/facade_util.py @@ -18,6 +18,7 @@ from . import keyword from .config_util import get_function_config + def get_block_config(): """ Get Empty function config. @@ -28,6 +29,7 @@ def get_block_config(): ret[keyword.expect] = [] return ret + def fill_block_config(ret, block_config, tid, group, desc_inputs, desc_bprop, expect, desc_const, const_first, add_fake_input, fake_input_type): """ @@ -95,7 +97,7 @@ def fill_block_config(ret, block_config, tid, group, desc_inputs, desc_bprop, ex if expect: expect_list.append({ - keyword.id: tid+'-'+tid, - keyword.group: group+'-'+group, + keyword.id: tid + '-' + tid, + keyword.group: group + '-' + group, keyword.desc_expect: expect }) diff --git a/tests/mindspore_test_framework/utils/keyword.py b/tests/mindspore_test_framework/utils/keyword.py index 79f1f91d8f..56c27b0d04 100644 --- a/tests/mindspore_test_framework/utils/keyword.py +++ b/tests/mindspore_test_framework/utils/keyword.py @@ -17,11 +17,14 @@ import sys + class _MindsporeTestFrameworkkeyword: def __setattr__(self, name, value): if name in self.__dict__: raise TypeError("can not rebind keyword (%s)" % name) self.__dict__[name] = value + + keyword = _MindsporeTestFrameworkkeyword() keyword.function = "function" diff --git a/tests/mindspore_test_framework/utils/model_util.py b/tests/mindspore_test_framework/utils/model_util.py index 61a22f27cb..ca35fdb680 100644 --- a/tests/mindspore_test_framework/utils/model_util.py +++ b/tests/mindspore_test_framework/utils/model_util.py @@ -24,8 +24,10 @@ from mindspore.ops import operations as P from mindspore.ops import functional as F from mindspore.ops import composite as C + class SquaredLoss(nn.Cell): """Squared loss function.""" + def __init__(self): super(SquaredLoss, self).__init__() self.reshape = P.Reshape() @@ -37,7 +39,10 @@ class SquaredLoss(nn.Cell): ret = y_hat - self.reshape(y, self.shape(y_hat)) return self.reduce_sum((ret * ret) / self.two, (0,)) + opt_step = C.MultitypeFuncGraph("opt_step") + + @opt_step.register("Tensor", "Tensor", "Tensor", "Tensor") def update_opt_step(learning_rate, batch_size, parameter, gradient): @@ -56,8 +61,10 @@ def update_opt_step(learning_rate, batch_size, parameter, gradient): F.assign(parameter, next_param) return next_param + class SGD(nn.Cell): """SGD optimizer.""" + def __init__(self, parameters, learning_rate=0.001, batch_size=1): super(SGD, self).__init__() self.parameters = ParameterTuple(parameters) @@ -73,8 +80,10 @@ class SGD(nn.Cell): self.parameters, gradients) return success + class Linreg(nn.Cell): """Linear regression model.""" + def __init__(self, num_features): super(Linreg, self).__init__() self.matmul = P.MatMul() @@ -84,8 +93,10 @@ class Linreg(nn.Cell): def construct(self, x): return self.matmul(x, self.w) + self.b + class Model: """Simplified model.""" + def __init__(self, network, loss_fn, optimizer): self.optimizer = optimizer self.step = nn.TrainOneStepCell(nn.WithLossCell(network, loss_fn), self.optimizer) diff --git a/tests/mindspore_test_framework/utils/npy_util.py b/tests/mindspore_test_framework/utils/npy_util.py index d44c562f6d..b27b757537 100644 --- a/tests/mindspore_test_framework/utils/npy_util.py +++ b/tests/mindspore_test_framework/utils/npy_util.py @@ -22,6 +22,7 @@ from mindspore.common.tensor import Tensor from .other_util import shape2tensor from .config_util import get_expect_config + def load_npy(p): s, dtype, scale, max_error, check_tolerance, relative_tolerance, absolute_tolerance = get_expect_config(p) if isinstance(s, str): @@ -33,6 +34,7 @@ def load_npy(p): ret = shape2tensor(s, dtype, scale) return ret, max_error, check_tolerance, relative_tolerance, absolute_tolerance + def load_data_from_npy_or_shape(dpaths, skip_expect_config=True): ret = [] for p in dpaths: diff --git a/tests/mindspore_test_framework/utils/other_util.py b/tests/mindspore_test_framework/utils/other_util.py index 7ded357c02..fb66519e5a 100644 --- a/tests/mindspore_test_framework/utils/other_util.py +++ b/tests/mindspore_test_framework/utils/other_util.py @@ -20,11 +20,13 @@ import numpy as np import mindspore._c_expression as _c_expression from mindspore.common.tensor import Tensor + def wrap(x): if isinstance(x, (tuple, list)): return x return (x,) + def to_numpy_list(tl): tl = wrap(tl) ret = [] @@ -35,11 +37,13 @@ def to_numpy_list(tl): ret.append(x) return ret + def to_numpy(x): if isinstance(x, (Tensor, _c_expression.Tensor)): return x.asnumpy() return x + def shape2tensor(shp, dtype=np.float32, scale=6): if isinstance(shp, list): if not shp: @@ -47,11 +51,12 @@ def shape2tensor(shp, dtype=np.float32, scale=6): return Tensor((np.random.rand(*shp) * scale).astype(dtype)) return shp + def select_from_config_tuple(t, index, default): if not isinstance(t, tuple): return default if not isinstance(t[-1], dict): return default - if index > len(t)-1: + if index > len(t) - 1: return default return t[index] diff --git a/tests/mindspore_test_framework/utils/verifier_util.py b/tests/mindspore_test_framework/utils/verifier_util.py index 2084b1e791..989251450d 100644 --- a/tests/mindspore_test_framework/utils/verifier_util.py +++ b/tests/mindspore_test_framework/utils/verifier_util.py @@ -17,6 +17,7 @@ import numpy as np + def count_unequal_element(expect, result, rtol, atol): """ Count unequal element. @@ -33,15 +34,16 @@ def count_unequal_element(expect, result, rtol, atol): raise ValueError(f'expect.shape {expect.shape}, result.shape {result.shape}') total_count = len(expect.flatten()) error = np.abs(expect - result) - count = np.count_nonzero(np.less_equal(error, atol + np.abs(result)*rtol)) - if ((total_count-count)/total_count) >= rtol: + count = np.count_nonzero(np.less_equal(error, atol + np.abs(result) * rtol)) + if ((total_count - count) / total_count) >= rtol: raise ValueError(f'expect {expect}, but got {result}, ' - f'{total_count-count} / {total_count} elements out of tolerance, ' + f'{total_count - count} / {total_count} elements out of tolerance, ' f'absolute_tolerance {atol}, relative_tolerance {rtol}') print(f'expect {expect}, got {result}, ' - f'{total_count-count} / {total_count} elements out of tolerance, ' + f'{total_count - count} / {total_count} elements out of tolerance, ' f'absolute_tolerance {atol}, relative_tolerance {rtol}') + def tolerance_assert(expect, result, rtol, atol): """ Verify if results are in expected tolerance. diff --git a/tests/ops_common.py b/tests/ops_common.py index e993266496..7278d7da4f 100644 --- a/tests/ops_common.py +++ b/tests/ops_common.py @@ -21,8 +21,10 @@ import mindspore.ops.operations as P from mindspore import Tensor from mindspore.common.api import _executor + class InputBackward(nn.Cell): """ InputBackward definition """ + def __init__(self, network, c1=None, c2=None): super(InputBackward, self).__init__() self.network = network @@ -58,6 +60,7 @@ class InputBackward(nn.Cell): class InputOpNet(nn.Cell): """ InputOpNet definition """ + def __init__(self, op, get_first=False, c1=None, c2=None, c3=None, c4=None): super(InputOpNet, self).__init__() @@ -76,6 +79,7 @@ class InputOpNet(nn.Cell): if self.get_first: x = x[0] return x + def construct0_c1_fack(self, data): x = self.op(self.c1) + data if self.get_first: @@ -148,7 +152,6 @@ class InputOpNet(nn.Cell): x = x[0] return x - def construct2_c1(self, x1, x2): x = self.op(x1, x2, self.c1) if self.get_first: @@ -203,8 +206,10 @@ class InputOpNet(nn.Cell): x = x[0] return x + class NetOutputAsLoss(nn.Cell): """ NetOutputAsLoss definition """ + def __init__(self, network, output_index): super(NetOutputAsLoss, self).__init__() self.network = network @@ -233,18 +238,21 @@ class NetOutputAsLoss(nn.Cell): predict = self.network(x1, x2, x3, x4, x5)[self.output_index] return predict + def get_loss_fun(construct_net, num_input, output_index): net = NetOutputAsLoss(construct_net, output_index) f = getattr(net, 'construct%d' % num_input) setattr(net, "construct", f) return net + def build_construct_graph(net, *inputs, execute=True): net.set_train() _executor.compile(net, *inputs) if execute: _executor(net, inputs) + def build_backward_graph(net, output_shapes, inputs, execute=True): inputs = append_sens_to_inputs(output_shapes, inputs) net = gen_backward_net(net, len(inputs) - 1) @@ -253,6 +261,7 @@ def build_backward_graph(net, output_shapes, inputs, execute=True): if execute: _executor(net, inputs) + def convert(shp, dtype=np.float32, scale=6): if isinstance(shp, list): if not shp: @@ -260,12 +269,14 @@ def convert(shp, dtype=np.float32, scale=6): return Tensor((np.random.rand(*shp) * scale).astype(dtype)) return shp + def gen_inputs(input_shapes, config): add_fack_input = config.get('add_fack_input', False) if not input_shapes and add_fack_input: return [Tensor(np.array([1.0]).astype(config.get('fack_input_type', np.float32)))] return [convert(shp) for shp in input_shapes] + def gen_backward_inputs(input_shapes, output_shapes, config): add_fack_input = config.get('add_fack_input', False) if not input_shapes and add_fack_input: @@ -276,11 +287,13 @@ def gen_backward_inputs(input_shapes, output_shapes, config): sens = convert(sens_shape) return inputs + [sens] + def append_sens_to_inputs(output_shapes, inputs): inputs = inputs sens = Tensor(np.random.normal(0, 1, output_shapes).astype(np.float32)) return inputs + [sens] + def gen_net(shapes, config, get_first=False): """ gen_net function @@ -313,14 +326,17 @@ def gen_backward_net(construct_net, input_num): setattr(net, "construct", f) return net + def batch_tuple_tensor(data, batch_size): ret = [Tensor(np.tile(d.asnumpy(), (batch_size, 1))) for d in data] return tuple(ret) + class OutPutWrap(nn.Cell): """ OutPutWrap definition """ + def __init__(self, network, num_output, output_is_tuple): super(OutPutWrap, self).__init__() self.network = network @@ -387,6 +403,7 @@ class OutPutWrap(nn.Cell): ret = ret + F.make_tuple(predict[i] * self.cast(self.one, self.dtype(predict[i]))) return ret + def get_output_wrap(network, num_input, num_output, output_is_tuple=0): net = OutPutWrap(network, num_output, output_is_tuple) f = getattr(net, 'construct%d' % num_input) diff --git a/tests/st/auto_parallel/test_expand_loss.py b/tests/st/auto_parallel/test_expand_loss.py index e89ee5d3c8..ed309ee2b8 100644 --- a/tests/st/auto_parallel/test_expand_loss.py +++ b/tests/st/auto_parallel/test_expand_loss.py @@ -23,4 +23,4 @@ import pytest def test_expand_loss(): sh_path = os.path.split(os.path.realpath(__file__))[0] ret = os.system(f"sh {sh_path}/run_auto_parallel_loss_expand.sh") - assert(ret == 0) + assert (ret == 0) diff --git a/tests/st/auto_parallel/test_model_parallel_onehot.py b/tests/st/auto_parallel/test_model_parallel_onehot.py index 55217421a4..f0ad888ecc 100644 --- a/tests/st/auto_parallel/test_model_parallel_onehot.py +++ b/tests/st/auto_parallel/test_model_parallel_onehot.py @@ -19,4 +19,4 @@ import pytest def test_expand_loss(): ret = os.system("sh run_onehot_model_parallel.sh") - assert(ret == 0) + assert (ret == 0) diff --git a/tests/st/control/test_cont_break.py b/tests/st/control/test_cont_break.py index 124ee3efa6..dafac487c2 100644 --- a/tests/st/control/test_cont_break.py +++ b/tests/st/control/test_cont_break.py @@ -18,6 +18,7 @@ import numpy as np from mindspore.nn import Cell from mindspore import Tensor, Model, context + def run_test(netclass, count, dev): context.set_context(mode=context.GRAPH_MODE, device_target=dev) net = netclass() @@ -25,10 +26,11 @@ def run_test(netclass, count, dev): for _ in range(count): input_np = np.random.randn(2, 3).astype(np.float32) input_ms = Tensor(input_np) - output_np = net.construct(input_np) # run python - output_ms = model.predict(input_ms) # run graph + output_np = net.construct(input_np) # run python + output_ms = model.predict(input_ms) # run graph np.testing.assert_array_almost_equal(output_np, output_ms.asnumpy(), decimal=3) + class for_loop_with_break(Cell): def __init__(self): super().__init__() @@ -42,6 +44,7 @@ class for_loop_with_break(Cell): pass return x + class for_loop_with_continue(Cell): def __init__(self): super().__init__() @@ -54,6 +57,7 @@ class for_loop_with_continue(Cell): x = x * 2 return x + class for_loop_with_cont_break(Cell): def __init__(self): super().__init__() @@ -71,6 +75,7 @@ class for_loop_with_cont_break(Cell): pass return x + class for_nested_loop_with_break(Cell): def __init__(self): super().__init__() @@ -84,6 +89,7 @@ class for_nested_loop_with_break(Cell): x = x * 1.5 return x + class while_with_break(Cell): def __init__(self): super().__init__() @@ -98,6 +104,7 @@ class while_with_break(Cell): i += 1 return x + class while_with_continue(Cell): def __init__(self): super().__init__() @@ -113,6 +120,7 @@ class while_with_continue(Cell): i += 1 return x + class while_for_nested(Cell): def __init__(self): super().__init__() @@ -131,6 +139,7 @@ class while_for_nested(Cell): i += 1 return x + class pass_branch(Cell): def __init__(self): super().__init__() @@ -145,6 +154,7 @@ class pass_branch(Cell): i += 1 return x + @pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard @@ -159,4 +169,3 @@ def test_cont_break(): run_test(while_with_continue, count, dev) run_test(while_for_nested, count, dev) run_test(pass_branch, count, dev) - diff --git a/tests/st/gnn/aggregator.py b/tests/st/gnn/aggregator.py index 5e208a2329..8dfe2d32e3 100644 --- a/tests/st/gnn/aggregator.py +++ b/tests/st/gnn/aggregator.py @@ -64,6 +64,7 @@ class GNNFeatureTransform(nn.Cell): [[ 2.5246444 2.2738023 0.5711005 -3.9399147 ] [ 1.0739875 4.0155234 0.94188046 -5.459526 ]] """ + @cell_attr_register def __init__(self, in_channels, @@ -78,7 +79,7 @@ class GNNFeatureTransform(nn.Cell): if isinstance(weight_init, Tensor): if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \ - weight_init.shape()[1] != in_channels: + weight_init.shape()[1] != in_channels: raise ValueError("weight_init shape error") self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight") @@ -104,7 +105,7 @@ class GNNFeatureTransform(nn.Cell): def extend_repr(self): str_info = 'in_channels={}, out_channels={}, weight={}, has_bias={}' \ - .format(self.in_channels, self.out_channels, self.weight, self.has_bias) + .format(self.in_channels, self.out_channels, self.weight, self.has_bias) if self.has_bias: str_info = str_info + ', bias={}'.format(self.bias) @@ -136,6 +137,7 @@ class _BaseAggregator(nn.Cell): >>> def construct(self, x): >>> return self.reduce_mean(x, 1) """ + def __init__(self, feature_in_dim, feature_out_dim, @@ -191,6 +193,7 @@ class MeanAggregator(_BaseAggregator): >>> input_data = Tensor(np.array(np.random.rand(32, 3, 32), dtypy=np.float32)) >>> output = net(input_data) """ + def __init__(self, feature_in_dim, feature_out_dim, @@ -349,6 +352,7 @@ class AttentionAggregator(nn.Cell): 8) >>> net(input_data, biases) """ + def __init__(self, in_channels, out_channels, diff --git a/tests/st/gnn/test_gnn_aggregator.py b/tests/st/gnn/test_gnn_aggregator.py index 6335b4c832..c244b3cb8a 100644 --- a/tests/st/gnn/test_gnn_aggregator.py +++ b/tests/st/gnn/test_gnn_aggregator.py @@ -27,6 +27,7 @@ context.set_context(mode=context.GRAPH_MODE) class MeanAggregatorGrad(nn.Cell): """Backward of MeanAggregator""" + def __init__(self, network): super(MeanAggregatorGrad, self).__init__() self.grad_op = C.grad_all_with_sens diff --git a/tests/st/nccl/test_nccl_all.py b/tests/st/nccl/test_nccl_all.py index faa6394f9a..2ad160fab3 100644 --- a/tests/st/nccl/test_nccl_all.py +++ b/tests/st/nccl/test_nccl_all.py @@ -21,7 +21,7 @@ import pytest @pytest.mark.env_single def test_nccl_lenet(): return_code = os.system("mpirun -n 8 pytest -s test_nccl_lenet.py") - assert(return_code == 0) + assert (return_code == 0) @pytest.mark.level0 @@ -29,7 +29,7 @@ def test_nccl_lenet(): @pytest.mark.env_single def test_nccl_all_reduce_op(): return_code = os.system("mpirun -n 8 pytest -s test_nccl_all_reduce_op.py") - assert(return_code == 0) + assert (return_code == 0) @pytest.mark.level0 @@ -37,7 +37,7 @@ def test_nccl_all_reduce_op(): @pytest.mark.env_single def test_nccl_all_gather_op(): return_code = os.system("mpirun -n 8 pytest -s test_nccl_all_gather_op.py") - assert(return_code == 0) + assert (return_code == 0) @pytest.mark.level0 @@ -45,4 +45,4 @@ def test_nccl_all_gather_op(): @pytest.mark.env_single def test_nccl_reduce_scatter_op(): return_code = os.system("mpirun -n 8 pytest -s test_nccl_reduce_scatter_op.py") - assert(return_code == 0) + assert (return_code == 0) diff --git a/tests/st/nccl/test_nccl_lenet.py b/tests/st/nccl/test_nccl_lenet.py index 4ed424b6ee..ae15a9e3ac 100644 --- a/tests/st/nccl/test_nccl_lenet.py +++ b/tests/st/nccl/test_nccl_lenet.py @@ -71,7 +71,7 @@ class LeNet(nn.Cell): def multisteplr(total_steps, gap, base_lr=0.9, gamma=0.1, dtype=mstype.float32): lr = [] for step in range(total_steps): - lr_ = base_lr * gamma ** (step//gap) + lr_ = base_lr * gamma ** (step // gap) lr.append(lr_) return Tensor(np.array(lr), dtype) @@ -104,4 +104,4 @@ def test_lenet_nccl(): with open("ms_loss.txt", "w") as fo2: fo2.write("loss:") fo2.write(str(losses[-5:])) - assert(losses[-1] < 0.01) + assert (losses[-1] < 0.01) diff --git a/tests/st/nccl/test_nccl_reduce_scatter_op.py b/tests/st/nccl/test_nccl_reduce_scatter_op.py index f3322d07a3..76f2022b78 100644 --- a/tests/st/nccl/test_nccl_reduce_scatter_op.py +++ b/tests/st/nccl/test_nccl_reduce_scatter_op.py @@ -20,6 +20,7 @@ from mindspore.ops import operations as P from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.communication.management import init, NCCL_WORLD_COMM_GROUP, get_rank, get_group_size + context.set_context(mode=context.GRAPH_MODE, device_target='GPU') init('nccl') diff --git a/tests/st/networks/models/bert/bert_tdt_lossscale.py b/tests/st/networks/models/bert/bert_tdt_lossscale.py index e6578af749..ee7e8281a7 100644 --- a/tests/st/networks/models/bert/bert_tdt_lossscale.py +++ b/tests/st/networks/models/bert/bert_tdt_lossscale.py @@ -30,10 +30,12 @@ from mindspore.train.loss_scale_manager import DynamicLossScaleManager from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertNetworkWithLoss, BertTrainOneStepWithLossScaleCell from mindspore.nn.optim import Momentum from mindspore import log as logger + _current_dir = os.path.dirname(os.path.realpath(__file__)) DATA_DIR = ["/home/workspace/mindspore_dataset/bert/example/examples.tfrecord"] SCHEMA_DIR = "/home/workspace/mindspore_dataset/bert/example/datasetSchema.json" + def get_config(version='base', batch_size=1): """get config""" if version == 'base': @@ -80,13 +82,14 @@ def get_config(version='base', batch_size=1): bert_config = BertConfig(batch_size=batch_size) return bert_config + def me_de_train_dataset(): """test me de train dataset""" # apply repeat operations repeat_count = 1 ds = de.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["input_ids", "input_mask", "segment_ids", - "next_sentence_labels", "masked_lm_positions", - "masked_lm_ids", "masked_lm_weights"], shuffle=False) + "next_sentence_labels", "masked_lm_positions", + "masked_lm_ids", "masked_lm_weights"], shuffle=False) type_cast_op = C.TypeCast(mstype.int32) ds = ds.map(input_columns="masked_lm_ids", operations=type_cast_op) ds = ds.map(input_columns="masked_lm_positions", operations=type_cast_op) @@ -100,12 +103,14 @@ def me_de_train_dataset(): ds = ds.repeat(repeat_count) return ds + def weight_variable(shape): """weight variable""" np.random.seed(1) ones = np.random.uniform(-0.1, 0.1, size=shape).astype(np.float32) return Tensor(ones) + class ModelCallback(Callback): def __init__(self): super(ModelCallback, self).__init__() @@ -120,6 +125,7 @@ class ModelCallback(Callback): self.lossscale_list.append(cb_params.net_outputs[2].asnumpy()) print("epoch: {}, outputs are: {}".format(cb_params.cur_epoch_num, str(cb_params.net_outputs))) + @pytest.mark.level0 @pytest.mark.platform_arm_ascend_training @pytest.mark.platform_x86_ascend_training @@ -134,8 +140,9 @@ def test_bert_tdt(): netwithloss = BertNetworkWithLoss(config, True) optimizer = Momentum(netwithloss.trainable_params(), learning_rate=2e-5, momentum=0.9) scale_window = 3 - scale_manager = DynamicLossScaleManager(2**16, 2, scale_window) - netwithgrads = BertTrainOneStepWithLossScaleCell(netwithloss, optimizer=optimizer, scale_update_cell=scale_manager.get_update_cell()) + scale_manager = DynamicLossScaleManager(2 ** 16, 2, scale_window) + netwithgrads = BertTrainOneStepWithLossScaleCell(netwithloss, optimizer=optimizer, + scale_update_cell=scale_manager.get_update_cell()) netwithgrads.set_train(True) model = Model(netwithgrads) callback = ModelCallback() @@ -162,10 +169,11 @@ def test_bert_tdt(): # assertion occurs while the loss value, overflow state or loss_scale value is wrong loss_value = np.array(callback.loss_list) - expect_loss_value = [12.1918125, 11.966035, 11.972114, 11.982189, 11.973948, 12.610932, 12.17564, 12.840248, 12.40294, 12.621653] + expect_loss_value = [12.1918125, 11.966035, 11.972114, 11.982189, 11.973948, 12.610932, 12.17564, 12.840248, + 12.40294, 12.621653] print("loss value: {}".format(loss_value)) assert np.allclose(loss_value, expect_loss_value, 0.00001, 0.00001) - + overflow = np.array(callback.overflow_list) expect_overflow = [True, True, False, False, False, True, False, False, False, True] print("overflow: {}".format(overflow)) @@ -176,5 +184,6 @@ def test_bert_tdt(): print("loss scale: {}".format(loss_scale)) assert np.allclose(loss_scale, expect_loss_scale, 0.00001, 0.00001) + if __name__ == '__main__': test_bert_tdt() diff --git a/tests/st/networks/test_gpu_alexnet.py b/tests/st/networks/test_gpu_alexnet.py index 699617b384..2693192381 100644 --- a/tests/st/networks/test_gpu_alexnet.py +++ b/tests/st/networks/test_gpu_alexnet.py @@ -42,7 +42,7 @@ class AlexNet(nn.Cell): self.relu = nn.ReLU() self.max_pool2d = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="valid") self.flatten = nn.Flatten() - self.fc1 = nn.Dense(6*6*256, 4096) + self.fc1 = nn.Dense(6 * 6 * 256, 4096) self.fc2 = nn.Dense(4096, 4096) self.fc3 = nn.Dense(4096, num_classes) @@ -87,4 +87,4 @@ def test_trainTensor(num_classes=10, epoch=15, batch_size=32): label = Tensor(np.ones([batch_size]).astype(np.int32)) loss = train_network(data, label) losses.append(loss) - assert(losses[-1].asnumpy() < 0.01) + assert (losses[-1].asnumpy() < 0.01) diff --git a/tests/st/networks/test_gpu_lstm.py b/tests/st/networks/test_gpu_lstm.py index acf5ca9396..a5fe438745 100644 --- a/tests/st/networks/test_gpu_lstm.py +++ b/tests/st/networks/test_gpu_lstm.py @@ -25,7 +25,6 @@ from mindspore.nn import Dense from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter - context.set_context(mode=context.GRAPH_MODE, device_target="GPU") @@ -104,6 +103,8 @@ class SentimentNet(nn.Cell): batch_size = 64 + + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @@ -117,9 +118,9 @@ def test_LSTM(): vocab_size = 252193 max_len = 500 - weight = np.ones((vocab_size+1, embed_size)).astype(np.float32) + weight = np.ones((vocab_size + 1, embed_size)).astype(np.float32) - net = SentimentNet(vocab_size=(vocab_size+1), embed_size=embed_size, + net = SentimentNet(vocab_size=(vocab_size + 1), embed_size=embed_size, num_hiddens=num_hiddens, num_layers=num_layers, bidirectional=bidirectional, weight=weight, labels=labels, batch_size=batch_size) @@ -140,4 +141,4 @@ def test_LSTM(): loss = train_network(train_features, train_labels) losses.append(loss) print("loss:", loss.asnumpy()) - assert(losses[-1].asnumpy() < 0.01) + assert (losses[-1].asnumpy() < 0.01) diff --git a/tests/st/networks/test_gpu_resnet.py b/tests/st/networks/test_gpu_resnet.py index afae359b4e..906bdbb2d0 100644 --- a/tests/st/networks/test_gpu_resnet.py +++ b/tests/st/networks/test_gpu_resnet.py @@ -340,7 +340,8 @@ def test_trainTensor(num_classes=10, epoch=8, batch_size=1): label = Tensor(np.ones([batch_size]).astype(np.int32)) loss = train_network(data, label) losses.append(loss) - assert(losses[-1].asnumpy() < 1) + assert (losses[-1].asnumpy() < 1) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @@ -359,6 +360,6 @@ def test_trainTensor_amp(num_classes=10, epoch=18, batch_size=16): label = Tensor(np.ones([batch_size]).astype(np.int32)) loss = train_network(data, label) losses.append(loss) - assert(losses[-1][0].asnumpy() < 1) - assert(losses[-1][1].asnumpy() == False) - assert(losses[-1][2].asnumpy() > 1) + assert (losses[-1][0].asnumpy() < 1) + assert (losses[-1][1].asnumpy() == False) + assert (losses[-1][2].asnumpy() > 1) diff --git a/tests/st/ops/cpu/test_argmax_op.py b/tests/st/ops/cpu/test_argmax_op.py index b448bb3807..b3d8fed466 100644 --- a/tests/st/ops/cpu/test_argmax_op.py +++ b/tests/st/ops/cpu/test_argmax_op.py @@ -25,27 +25,27 @@ from mindspore.common.parameter import Parameter context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + class NetArgmax(nn.Cell): - def __init__( self): + def __init__(self): super(NetArgmax, self).__init__() self.argmax = P.Argmax(output_type=mstype.int32) x = Tensor(np.array([[1., 20., 5.], [67., 8., 9.], [130., 24., 15.]]).astype(np.float32)) - self.x = Parameter(initializer(x, x.shape()), name ='x') + self.x = Parameter(initializer(x, x.shape()), name='x') def construct(self): return self.argmax(self.x) + @pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_argmax(): - Argmax = NetArgmax() output = Argmax() print("================================") - expect = np.array([1,0,0]).astype(np.float32) + expect = np.array([1, 0, 0]).astype(np.float32) print(output) assert (output.asnumpy() == expect).all() - diff --git a/tests/st/ops/cpu/test_bias_add.py b/tests/st/ops/cpu/test_bias_add.py index 2a4b8622bb..11e7cd3662 100644 --- a/tests/st/ops/cpu/test_bias_add.py +++ b/tests/st/ops/cpu/test_bias_add.py @@ -18,8 +18,10 @@ from mindspore.ops import operations as P import mindspore.nn as nn import numpy as np import mindspore.context as context + context.set_context(mode=context.GRAPH_MODE, device_target='CPU') + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -28,26 +30,28 @@ class Net(nn.Cell): def construct(self, x, b): return self.bias_add(x, b) + @pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_bias_add1(): - x = np.ones([2,3,4,4]).astype(np.float32) - b = np.array([1,1,1]).astype(np.float32) + x = np.ones([2, 3, 4, 4]).astype(np.float32) + b = np.array([1, 1, 1]).astype(np.float32) bias_add = Net() output = bias_add(Tensor(x), Tensor(b)) - expect_output = np.ones([2,3,4,4]).astype(np.float32)*2 + expect_output = np.ones([2, 3, 4, 4]).astype(np.float32) * 2 print(output) - assert np.all(output.asnumpy()==expect_output), "bias_add execute failed, please check current code commit" + assert np.all(output.asnumpy() == expect_output), "bias_add execute failed, please check current code commit" + @pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_bias_add2(): - x = np.ones([2,3]).astype(np.float32) - b = np.array([1,1,1]).astype(np.float32) + x = np.ones([2, 3]).astype(np.float32) + b = np.array([1, 1, 1]).astype(np.float32) bias_add = Net() output = bias_add(Tensor(x), Tensor(b)) - expect_output = np.ones([2,3]).astype(np.float32)*2 + expect_output = np.ones([2, 3]).astype(np.float32) * 2 print(output) - assert np.all(output.asnumpy()==expect_output), "bias_add execute failed, please check current code commit" + assert np.all(output.asnumpy() == expect_output), "bias_add execute failed, please check current code commit" diff --git a/tests/st/ops/cpu/test_bias_add_grad.py b/tests/st/ops/cpu/test_bias_add_grad.py index 01dcdae690..01253fd03b 100644 --- a/tests/st/ops/cpu/test_bias_add_grad.py +++ b/tests/st/ops/cpu/test_bias_add_grad.py @@ -19,8 +19,10 @@ from mindspore.ops.operations import _grad_ops as G import mindspore.nn as nn import numpy as np import mindspore.context as context + context.set_context(mode=context.GRAPH_MODE, device_target='CPU') + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -29,24 +31,26 @@ class Net(nn.Cell): def construct(self, dout): return self.bias_add_grad(dout) + @pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_bias_add_grad1(): - dout = np.ones([2,3]).astype(np.float32) + dout = np.ones([2, 3]).astype(np.float32) bias_add_grad = Net() output = bias_add_grad(Tensor(dout)) - expect_output = np.array([2.,2.,2.]).astype(np.float32) + expect_output = np.array([2., 2., 2.]).astype(np.float32) print(output.asnumpy()) - assert np.all(output.asnumpy()==expect_output), "bias_add_grad execute failed, please check current code commit" + assert np.all(output.asnumpy() == expect_output), "bias_add_grad execute failed, please check current code commit" + @pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_bias_add_grad2(): - dout = np.ones([2,3,4,4]).astype(np.float32) + dout = np.ones([2, 3, 4, 4]).astype(np.float32) bias_add_grad = Net() output = bias_add_grad(Tensor(dout)) - expect_output = np.array([32.,32.,32.]).astype(np.float32) + expect_output = np.array([32., 32., 32.]).astype(np.float32) print(output.asnumpy()) - assert np.all(output.asnumpy()==expect_output), "bias_add_grad execute failed, please check current code commit" + assert np.all(output.asnumpy() == expect_output), "bias_add_grad execute failed, please check current code commit" diff --git a/tests/st/ops/cpu/test_conv2d_backprop_filter_op.py b/tests/st/ops/cpu/test_conv2d_backprop_filter_op.py index c2f8422e30..bde48446ac 100644 --- a/tests/st/ops/cpu/test_conv2d_backprop_filter_op.py +++ b/tests/st/ops/cpu/test_conv2d_backprop_filter_op.py @@ -25,32 +25,35 @@ from mindspore.common.parameter import Parameter context.set_context(mode=context.GRAPH_MODE, device_target='CPU') + class Net4(nn.Cell): def __init__(self): super(Net4, self).__init__() out_channel = 4 kernel_size = 1 self.conv_filter = G.Conv2DBackpropFilter(out_channel, - kernel_size, - pad_mode="valid", - pad=0, - mode=1, - stride=(1, 1), - dilation=(1, 1, 1, 1), - group=1) - self.w = Parameter(initializer(Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='w') + kernel_size, + pad_mode="valid", + pad=0, + mode=1, + stride=(1, 1), + dilation=(1, 1, 1, 1), + group=1) + self.w = Parameter( + initializer(Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), + name='w') self.x = Parameter(initializer(Tensor(np.array([[[ - [3, 0, 1, 2, 7, 4], - [1, 5, 8, 9, 3, 1], - [2, 7, 2, 5, 1, 3], - [0, 1, 3, 1, 7, 8], - [4, 2, 1, 6, 2, 8], - [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)), [1,1,6,6]), name='x') + [3, 0, 1, 2, 7, 4], + [1, 5, 8, 9, 3, 1], + [2, 7, 2, 5, 1, 3], + [0, 1, 3, 1, 7, 8], + [4, 2, 1, 6, 2, 8], + [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)), [1, 1, 6, 6]), name='x') self.out = Parameter(initializer(Tensor(np.array([[[ - [ -5, -4, 0, 8], - [-10, -2, 2, 3], - [ 0, -2, -4, -7], - [ -3, -2, -3, -16]]]]).astype(np.float32)),[1,1,4,4]), name='y') + [-5, -4, 0, 8], + [-10, -2, 2, 3], + [0, -2, -4, -7], + [-3, -2, -3, -16]]]]).astype(np.float32)), [1, 1, 4, 4]), name='y') self.get_shape = P.Shape() def construct(self): @@ -70,8 +73,8 @@ def test_conv2d_backprop_filter(): [-104, -211, -322] [-102, -144, -248]]]] """ - expect = np.array([[[[ -60, -142, -265], - [-104, -211, -322], - [-102, -144, -248]]]]).astype(np.float32) + expect = np.array([[[[-60, -142, -265], + [-104, -211, -322], + [-102, -144, -248]]]]).astype(np.float32) print(output) assert (output.asnumpy() == expect).all() diff --git a/tests/st/ops/cpu/test_conv2d_backprop_input_op.py b/tests/st/ops/cpu/test_conv2d_backprop_input_op.py index 2c0a5849af..952bef41e5 100644 --- a/tests/st/ops/cpu/test_conv2d_backprop_input_op.py +++ b/tests/st/ops/cpu/test_conv2d_backprop_input_op.py @@ -24,32 +24,35 @@ from mindspore.common.parameter import Parameter context.set_context(mode=context.GRAPH_MODE, device_target='CPU') + class Net5(nn.Cell): def __init__(self): super(Net5, self).__init__() out_channel = 4 kernel_size = 1 self.conv_input = P.Conv2DBackpropInput(out_channel, - kernel_size, - pad_mode="valid", - pad=0, - mode=1, - stride=1, - dilation=1, - group=1) - self.w = Parameter(initializer(Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='w') + kernel_size, + pad_mode="valid", + pad=0, + mode=1, + stride=1, + dilation=1, + group=1) + self.w = Parameter( + initializer(Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), + name='w') self.x = Parameter(initializer(Tensor(np.array([[[ [3, 0, 1, 2, 7, 4], [1, 5, 8, 9, 3, 1], [2, 7, 2, 5, 1, 3], [0, 1, 3, 1, 7, 8], [4, 2, 1, 6, 2, 8], - [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)), [1,1,6,6]), name='x') + [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)), [1, 1, 6, 6]), name='x') self.out = Parameter(initializer(Tensor(np.array([[[ - [ -5, -4, 0, 8], - [-10, -2, 2, 3], - [ 0, -2, -4, -7], - [ -3, -2, -3, -16]]]]).astype(np.float32)),[1,1,4,4]), name='y') + [-5, -4, 0, 8], + [-10, -2, 2, 3], + [0, -2, -4, -7], + [-3, -2, -3, -16]]]]).astype(np.float32)), [1, 1, 4, 4]), name='y') self.get_shape = P.Shape() def construct(self): @@ -72,11 +75,11 @@ def test_conv2d_backprop_input(): [ -3, -4, -4, -19, 7, 23] [ -3, -2, 0, -14, 3, 16]]]] """ - expect = np.array([[[[ -5, -4, 5, 12, 0, -8], - [-15, -6, 17, 17, -2, -11], - [-15, -8, 13, 12, 2, -4], - [-13, -6, 8, -14, 5, 20], - [ -3, -4, -4, -19, 7, 23], - [ -3, -2, 0, -14, 3, 16]]]]).astype(np.float32) + expect = np.array([[[[-5, -4, 5, 12, 0, -8], + [-15, -6, 17, 17, -2, -11], + [-15, -8, 13, 12, 2, -4], + [-13, -6, 8, -14, 5, 20], + [-3, -4, -4, -19, 7, 23], + [-3, -2, 0, -14, 3, 16]]]]).astype(np.float32) print(output) assert (output.asnumpy() == expect).all() diff --git a/tests/st/ops/cpu/test_conv2d_op.py b/tests/st/ops/cpu/test_conv2d_op.py index d298f2729b..061115e12a 100644 --- a/tests/st/ops/cpu/test_conv2d_op.py +++ b/tests/st/ops/cpu/test_conv2d_op.py @@ -24,8 +24,9 @@ from mindspore.common.parameter import Parameter context.set_context(mode=context.GRAPH_MODE, device_target='CPU') + class NetConv2d(nn.Cell): - def __init__( self): + def __init__(self): super(NetConv2d, self).__init__() out_channel = 2 kernel_size = 1 @@ -42,7 +43,6 @@ class NetConv2d(nn.Cell): self.x = Parameter(initializer( Tensor(np.arange(1 * 3 * 3 * 3).reshape(1, 3, 3, 3).astype(np.float32)), [1, 3, 3, 3]), name='x') - def construct(self): return self.conv(self.x, self.w) @@ -64,9 +64,9 @@ def test_conv2d(): [162. 174. 186.] [198. 210. 222.]]]] """ - expect = np.array([[[[ 45, 48, 51], - [ 54, 57, 60], - [ 63, 66, 69]], + expect = np.array([[[[45, 48, 51], + [54, 57, 60], + [63, 66, 69]], [[126, 138, 150], [162, 174, 186], [198, 210, 222]]]]).astype(np.float32) diff --git a/tests/st/ops/cpu/test_equalcount_op.py b/tests/st/ops/cpu/test_equalcount_op.py index 6ad33c5d6a..5d16008a2b 100644 --- a/tests/st/ops/cpu/test_equalcount_op.py +++ b/tests/st/ops/cpu/test_equalcount_op.py @@ -24,14 +24,15 @@ from mindspore.common.parameter import Parameter context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + class NetEqualCount(nn.Cell): - def __init__( self): + def __init__(self): super(NetEqualCount, self).__init__() self.equalcount = P.EqualCount() x = Tensor(np.array([1, 20, 5]).astype(np.int32)) y = Tensor(np.array([2, 20, 5]).astype(np.int32)) - self.x = Parameter(initializer(x, x.shape()), name ='x') - self.y = Parameter(initializer(y, y.shape()), name ='y') + self.x = Parameter(initializer(x, x.shape()), name='x') + self.y = Parameter(initializer(y, y.shape()), name='y') def construct(self): return self.equalcount(self.x, self.y) @@ -41,11 +42,9 @@ class NetEqualCount(nn.Cell): @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_equalcount(): - EqualCount = NetEqualCount() output = EqualCount() print("================================") expect = np.array([2]).astype(np.int32) print(output) assert (output.asnumpy() == expect).all() - diff --git a/tests/st/ops/cpu/test_maxpool_grad_op.py b/tests/st/ops/cpu/test_maxpool_grad_op.py index bf1b4d6eda..ed2a699bca 100644 --- a/tests/st/ops/cpu/test_maxpool_grad_op.py +++ b/tests/st/ops/cpu/test_maxpool_grad_op.py @@ -25,6 +25,7 @@ from mindspore.common.parameter import Parameter context.set_context(mode=context.GRAPH_MODE, device_target='CPU') + class Net_Pool_Grad(nn.Cell): def __init__(self): super(Net_Pool_Grad, self).__init__() @@ -56,7 +57,6 @@ class Net_Pool_Grad(nn.Cell): [31, 33, 35] ]]]).astype(np.float32)), [1, 1, 3, 3]), name='d') - def construct(self): return self.maxpool_grad_fun(self.x, self.a, self.d) @@ -78,4 +78,3 @@ def test_maxpool2d_grad(): [0, 31, 0, 33, 0, 35] ]]])) assert (output.asnumpy() == expect_result).all() - diff --git a/tests/st/ops/cpu/test_maxpool_op.py b/tests/st/ops/cpu/test_maxpool_op.py index 3d06250502..c9240dba67 100644 --- a/tests/st/ops/cpu/test_maxpool_op.py +++ b/tests/st/ops/cpu/test_maxpool_op.py @@ -21,20 +21,25 @@ import mindspore.context as context context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + class Net_Pool(nn.Cell): def __init__(self): super(Net_Pool, self).__init__() self.maxpool_fun = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="VALID") + def construct(self, x): return self.maxpool_fun(x) + class Net_Pool2(nn.Cell): def __init__(self): super(Net_Pool2, self).__init__() self.maxpool_fun = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="SAME") + def construct(self, x): return self.maxpool_fun(x) + @pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard diff --git a/tests/st/ops/cpu/test_momentum_op.py b/tests/st/ops/cpu/test_momentum_op.py index 63c758fb7d..445fe4e38e 100644 --- a/tests/st/ops/cpu/test_momentum_op.py +++ b/tests/st/ops/cpu/test_momentum_op.py @@ -25,6 +25,7 @@ import mindspore.context as context context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + class MomentumNet(nn.Cell): def __init__(self): super(MomentumNet, self).__init__() @@ -39,6 +40,7 @@ class MomentumNet(nn.Cell): output = self.fc1(output) return output + @pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard @@ -46,7 +48,7 @@ def test_momentum(): epoch = 13 net = MomentumNet() learning_rate = 0.1 - momentum = 0.9 + momentum = 0.9 optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate, momentum) criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) @@ -55,11 +57,11 @@ def test_momentum(): train_network.set_train() losses = [] for i in range(epoch): - data = Tensor(np.arange(0, 16).reshape(1, 1, 4, 4).astype(np.float32)*0.01) + data = Tensor(np.arange(0, 16).reshape(1, 1, 4, 4).astype(np.float32) * 0.01) label = Tensor(np.array([0]).astype(np.int32)) loss = train_network(data, label) losses.append(loss) - + print("================================") print(losses) """ diff --git a/tests/st/ops/cpu/test_mul_op.py b/tests/st/ops/cpu/test_mul_op.py index 98e2518198..c5051fe92a 100644 --- a/tests/st/ops/cpu/test_mul_op.py +++ b/tests/st/ops/cpu/test_mul_op.py @@ -1,17 +1,17 @@ -#Copyright 2019 Huawei Technologies Co., Ltd +# Copyright 2019 Huawei Technologies Co., Ltd # -#Licensed under the Apache License, Version 2.0(the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0(the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -#http: // www.apache.org/licenses/LICENSE-2.0 +# http: // www.apache.org/licenses/LICENSE-2.0 # -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. -#== == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == import pytest from mindspore import Tensor @@ -23,13 +23,14 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter -x = np.random.uniform(-2, 2, (2,3,4,4)).astype(np.float32) -y = np.random.uniform(-2, 2, (1,1,1,1)).astype(np.float32) +x = np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(np.float32) +y = np.random.uniform(-2, 2, (1, 1, 1, 1)).astype(np.float32) context.set_context(device_target='CPU') + class Net(nn.Cell): - def __init__( self): + def __init__(self): super(Net, self).__init__() self.mul = P.Mul() self.x = Parameter(initializer(Tensor(x), x.shape), name='x3') @@ -39,6 +40,7 @@ class Net(nn.Cell): def construct(self): return self.mul(self.x, self.y) + @pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard diff --git a/tests/st/ops/cpu/test_relu_grad_op.py b/tests/st/ops/cpu/test_relu_grad_op.py index 05823cf3d6..a4b2bb98c5 100644 --- a/tests/st/ops/cpu/test_relu_grad_op.py +++ b/tests/st/ops/cpu/test_relu_grad_op.py @@ -25,6 +25,7 @@ from mindspore.common.parameter import Parameter context.set_context(mode=context.GRAPH_MODE, device_target='CPU') + class NetReluGrad(nn.Cell): def __init__(self): super(NetReluGrad, self).__init__() @@ -35,16 +36,18 @@ class NetReluGrad(nn.Cell): self.dy = Parameter(initializer(Tensor(np.array([[[[1, 0, 1], [0, 1, 0], [1, 1, 1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='dy') + def construct(self): return self.rekuGrad(self.dy, self.x) + @pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_relu_grad(): relu_grad = NetReluGrad() output = relu_grad() - expect = np.array([[[ [0, 0, 1,],[0, 0, 0,],[1, 1, 0.] ]]]).astype(np.float32) + expect = np.array([[[[0, 0, 1, ], [0, 0, 0, ], [1, 1, 0.]]]]).astype(np.float32) error = np.ones(shape=[3, 3]) * 1.0e-6 diff = output.asnumpy() - expect assert np.all(diff < error) diff --git a/tests/st/ops/cpu/test_relu_op.py b/tests/st/ops/cpu/test_relu_op.py index 648ebcf5b9..9ef3f0af06 100644 --- a/tests/st/ops/cpu/test_relu_op.py +++ b/tests/st/ops/cpu/test_relu_op.py @@ -24,6 +24,7 @@ from mindspore.common.parameter import Parameter context.set_context(mode=context.GRAPH_MODE, device_target='CPU') + class NetRelu(nn.Cell): def __init__(self): super(NetRelu, self).__init__() @@ -31,17 +32,19 @@ class NetRelu(nn.Cell): self.x = Parameter(initializer(Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], [10, 1, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='x') + def construct(self): return self.relu(self.x) + @pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_relu(): relu = NetRelu() output = relu() - expect = np.array([[[ [0, 1, 10,], - [1, 0, 1,], - [10, 1, 0.]]]]).astype(np.float32) + expect = np.array([[[[0, 1, 10, ], + [1, 0, 1, ], + [10, 1, 0.]]]]).astype(np.float32) print(output) assert (output.asnumpy() == expect).all() diff --git a/tests/st/ops/cpu/test_softmax_op.py b/tests/st/ops/cpu/test_softmax_op.py index 29b05ef3fb..2a360f55b2 100644 --- a/tests/st/ops/cpu/test_softmax_op.py +++ b/tests/st/ops/cpu/test_softmax_op.py @@ -24,18 +24,20 @@ from mindspore.common.parameter import Parameter context.set_context(mode=context.GRAPH_MODE, device_target='CPU') + class NetSoftmax(nn.Cell): - def __init__( self): + def __init__(self): super(NetSoftmax, self).__init__() self.softmax = P.Softmax() x = Tensor(np.array([[0.1, 0.3, 0.6], [0.2, -0.6, 0.8], [0.6, 1, 0.4]]).astype(np.float32)) - self.x = Parameter(initializer(x, x.shape()), name ='x') + self.x = Parameter(initializer(x, x.shape()), name='x') def construct(self): return self.softmax(self.x) + @pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard @@ -49,4 +51,3 @@ def test_softmax(): diff = np.abs(outputSum - expect) print(diff) assert np.all(diff < error) - diff --git a/tests/st/ops/cpu/test_softmax_with_cross_entropy_op.py b/tests/st/ops/cpu/test_softmax_with_cross_entropy_op.py index 10dd2002b8..a6166b472b 100644 --- a/tests/st/ops/cpu/test_softmax_with_cross_entropy_op.py +++ b/tests/st/ops/cpu/test_softmax_with_cross_entropy_op.py @@ -24,15 +24,16 @@ from mindspore.common.parameter import Parameter context.set_context(mode=context.GRAPH_MODE, device_target='CPU') + class NetSoftmaxWithCrossEntropy(nn.Cell): - def __init__( self): + def __init__(self): super(NetSoftmaxWithCrossEntropy, self).__init__() - logits = Tensor(np.array([[1,1,10], - [1,10,1], - [10,1,1]]).astype(np.float32)) - self.logits = Parameter(initializer(logits, logits.shape()), name ='logits') - labels = Tensor(np.array([2,1,0]).astype(np.int32)) - self.labels = Parameter(initializer(labels, labels.shape()), name ='labels') + logits = Tensor(np.array([[1, 1, 10], + [1, 10, 1], + [10, 1, 1]]).astype(np.float32)) + self.logits = Parameter(initializer(logits, logits.shape()), name='logits') + labels = Tensor(np.array([2, 1, 0]).astype(np.int32)) + self.labels = Parameter(initializer(labels, labels.shape()), name='labels') self.SoftmaxWithCrossEntropy = P.SparseSoftmaxCrossEntropyWithLogits(True) def construct(self): @@ -45,9 +46,9 @@ class NetSoftmaxWithCrossEntropy(nn.Cell): def test_net(): SoftmaxWithCrossEntropy = NetSoftmaxWithCrossEntropy() output = SoftmaxWithCrossEntropy() - expect = np.array([[ 4.1126452e-05, 4.1126452e-05, -8.2234539e-05], - [ 4.1126452e-05, -8.2234539e-05, 4.1126452e-05], - [-8.2234539e-05, 4.1126452e-05, 4.1126452e-05]]).astype(np.float32) + expect = np.array([[4.1126452e-05, 4.1126452e-05, -8.2234539e-05], + [4.1126452e-05, -8.2234539e-05, 4.1126452e-05], + [-8.2234539e-05, 4.1126452e-05, 4.1126452e-05]]).astype(np.float32) print(output) error = np.ones(shape=[3, 3]) * 1.0e-6 diff = output.asnumpy() - expect diff --git a/tests/st/ops/custom_ops_tbe/conv2d.py b/tests/st/ops/custom_ops_tbe/conv2d.py index 266005f293..182b33ba7c 100755 --- a/tests/st/ops/custom_ops_tbe/conv2d.py +++ b/tests/st/ops/custom_ops_tbe/conv2d.py @@ -21,6 +21,8 @@ from topi.cce import util from te import platform as cce Nonetype = type(None) + + # pylint: disable=unused-argument, no-value-for-parameter, too-many-branches @fusion_manager.register("conv2d") def conv2d_compute(inputs, weights, bias, outputs, strides, pad_list, dilations, @@ -103,6 +105,7 @@ def conv2d_compute(inputs, weights, bias, outputs, strides, pad_list, dilations, return res + @util.check_input_type(dict, dict, (dict, Nonetype), dict, (tuple, list), (tuple, list), (tuple, list), str) def conv2d(inputs, weights, bias, outputs, strides, pad_list, dilations, @@ -189,7 +192,7 @@ def conv2d(inputs, weights, bias, outputs, strides, pad_list, dilations, if cce.CceProductParams().cce_product == "5.10": conv_layer_fast_cce(shape_fm, shape_filter, in_dtype, w_dtype, res_dtype, padh, padw, strideh, stridew, bias=use_bias, - kernel_name=kernel_name, need_build=True, need_print=False) + kernel_name=kernel_name, need_build=True, need_print=False) else: conv_layer_cce(shape_fm, shape_filter, in_dtype, w_dtype, res_dtype, padh, padw, strideh, stridew, diff --git a/tests/st/ops/custom_ops_tbe/conv_layer.py b/tests/st/ops/custom_ops_tbe/conv_layer.py index 3b3f9cdcf4..ed0293ad0d 100755 --- a/tests/st/ops/custom_ops_tbe/conv_layer.py +++ b/tests/st/ops/custom_ops_tbe/conv_layer.py @@ -18,14 +18,16 @@ from te.platform import CUBE_MKN from topi import generic from topi.cce import util from topi.cce.util import is_v200_version + # pylint: disable=R0912,R0913,R0914,R0915,E1101 # the dim of shape in conv must be 4 PAD_SHAPE_DIM = 2 NONETYPE = type(None) + @util.check_input_type((list, tuple), (list, tuple), str, str, str, (list, int), (list, int), - int, int,(list, tuple), (list, tuple), + int, int, (list, tuple), (list, tuple), str, str, str, str, str, str, str, bool, str) @@ -57,9 +59,9 @@ def conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, p if quantize_config[0] == 0: if is_v200_version(): - util.check_dtype_rule(in_dtype, ('int8', )) - util.check_dtype_rule(w_dtype, ('int8', )) - util.check_dtype_rule(res_dtype, ('int32', )) + util.check_dtype_rule(in_dtype, ('int8',)) + util.check_dtype_rule(w_dtype, ('int8',)) + util.check_dtype_rule(res_dtype, ('int32',)) else: util.check_dtype_rule(in_dtype, ['float16']) util.check_dtype_rule(w_dtype, ['float16']) @@ -117,7 +119,7 @@ def conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, p if isinstance(padh, list): if len(padh) != PAD_SHAPE_DIM: - raise RuntimeError("Dimension must be %d when padh is a list."%PAD_SHAPE_DIM) + raise RuntimeError("Dimension must be %d when padh is a list." % PAD_SHAPE_DIM) pad_top = padh[0] pad_bottom = padh[1] else: @@ -126,7 +128,7 @@ def conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, p if isinstance(padw, list): if len(padw) != PAD_SHAPE_DIM: - raise RuntimeError("Dimension must be %d when padw is a list."%PAD_SHAPE_DIM) + raise RuntimeError("Dimension must be %d when padw is a list." % PAD_SHAPE_DIM) pad_left = padw[0] pad_right = padw[1] else: @@ -134,8 +136,8 @@ def conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, p pad_right = padw shape_in, shape_w = te.lang.cce.check_conv_shape(shape_in, shape_w, pad_top, pad_bottom, \ - pad_left, pad_right, strideh, \ - stridew, in_dtype, w_dtype, res_dtype) + pad_left, pad_right, strideh, \ + stridew, in_dtype, w_dtype, res_dtype) return shape_in, shape_w @@ -248,9 +250,12 @@ def conv_layer_cce(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw, shape_in = list(shape_in) shape_w = list(shape_w) - shape_in, shape_w = conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw, strideh, stridew, - quantize_config, scale_sqrt, scale_q_dtype, offset_q_dtype, scale_dq_dtype, - scale_rq_dtype, offset_rq_dtype, offset_w_dtype, offset_pad_dtype, bias, kernel_name) + shape_in, shape_w = conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw, strideh, + stridew, + quantize_config, scale_sqrt, scale_q_dtype, offset_q_dtype, + scale_dq_dtype, + scale_rq_dtype, offset_rq_dtype, offset_w_dtype, offset_pad_dtype, + bias, kernel_name) # quantize switch on if quantize_config[0] == 1: @@ -338,7 +343,7 @@ def conv_layer_cce(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw, if is_quantize: scale_q = tvm.placeholder( (CUBE_MKN[scale_q_dtype]['mac'][1],), name='scaleQ', dtype=scale_q_dtype) - if quantize_algorithm ==1: + if quantize_algorithm == 1: offset_q = tvm.placeholder( (CUBE_MKN[offset_q_dtype]['mac'][1],), name='offsetQ', dtype=offset_q_dtype) @@ -353,13 +358,13 @@ def conv_layer_cce(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw, else (out_channel,) scale_rq = tvm.placeholder( scale_rq_shape, name='scaleRq', dtype=scale_rq_dtype) - if quantize_algorithm ==1: + if quantize_algorithm == 1: offset_rq_shape = (CUBE_MKN[offset_rq_dtype]['mac'][1],) offset_rq = tvm.placeholder( offset_rq_shape, name='offsetRq', dtype=offset_rq_dtype) # need offset_pad , for half offset - if quantize_algorithm ==1: + if quantize_algorithm == 1: offset_pad = tvm.placeholder( (CUBE_MKN[offset_pad_dtype]['mac'][1],), name='offset_pad', dtype=offset_pad_dtype) diff --git a/tests/st/ops/custom_ops_tbe/conv_layer_fast.py b/tests/st/ops/custom_ops_tbe/conv_layer_fast.py index 37b7d80424..93fe40fe70 100755 --- a/tests/st/ops/custom_ops_tbe/conv_layer_fast.py +++ b/tests/st/ops/custom_ops_tbe/conv_layer_fast.py @@ -17,12 +17,14 @@ from te import tvm from te.platform import CUBE_MKN from topi import generic from topi.cce import util + # pylint: disable=R0913,R0914,R0915,E1101 # the dim of shape in conv must be 4 PAD_SHAPE_DIM = 2 NoneType = type(None) + @util.check_input_type((list, tuple), (list, tuple), str, str, str, (list, int), (list, int), int, int, bool, str) def conv_layer_fast_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, @@ -40,7 +42,7 @@ def conv_layer_fast_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dty if isinstance(padh, list): if len(padh) != PAD_SHAPE_DIM: - raise RuntimeError("Dimension must be %d when padh is a list."%PAD_SHAPE_DIM) + raise RuntimeError("Dimension must be %d when padh is a list." % PAD_SHAPE_DIM) pad_top = padh[0] pad_bottom = padh[1] else: @@ -49,7 +51,7 @@ def conv_layer_fast_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dty if isinstance(padw, list): if len(padw) != PAD_SHAPE_DIM: - raise RuntimeError("Dimension must be %d when padw is a list."%PAD_SHAPE_DIM) + raise RuntimeError("Dimension must be %d when padw is a list." % PAD_SHAPE_DIM) pad_left = padw[0] pad_right = padw[1] else: @@ -62,6 +64,7 @@ def conv_layer_fast_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dty return shape_in, shape_w + @util.check_input_type((list, tuple), (list, tuple), str, str, str, (list, int), (list, int), int, int, bool, str, bool, bool) @@ -112,7 +115,7 @@ def conv_layer_fast_cce(shape_in, shape_w, in_dtype, w_dtype, res_dtype, shape_w = list(shape_w) shape_in, shape_w = conv_layer_fast_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, - padh, padw, strideh, stridew, bias, kernel_name) + padh, padw, strideh, stridew, bias, kernel_name) batch_size = shape_in[0] in_channel = shape_in[1] diff --git a/tests/st/ops/custom_ops_tbe/cus_conv2d.py b/tests/st/ops/custom_ops_tbe/cus_conv2d.py index 120bf2e3e6..11b6b77169 100644 --- a/tests/st/ops/custom_ops_tbe/cus_conv2d.py +++ b/tests/st/ops/custom_ops_tbe/cus_conv2d.py @@ -20,6 +20,8 @@ from mindspore import Tensor from mindspore._checkparam import ParamValidator as validator from mindspore._checkparam import Rel, check_bool, check_int_positive, twice from mindspore.common import dtype as mstype + + class Cus_Conv2D(PrimitiveWithInfer): r""" Applies 2D convolution for the input. @@ -92,13 +94,13 @@ class Cus_Conv2D(PrimitiveWithInfer): validator.check_type('kernel_size', kernel_size, [int, tuple]) if isinstance(kernel_size, int) and kernel_size < 1: raise ValueError('Attr \'kernel_size\' of \'Conv2D\' Op passed ' - + str(self.kernel_size)+', should be a int or tuple and equal to or greater than 1.') + + str(self.kernel_size) + ', should be a int or tuple and equal to or greater than 1.') if isinstance(kernel_size, tuple) and (len(kernel_size) != 2 or (not isinstance(kernel_size[0], int)) or (not isinstance(kernel_size[1], int)) or kernel_size[0] < 1 or kernel_size[1] < 1): raise ValueError('Attr \'kernel_size\' of \'Conv2D\' Op passed ' - + str(self.kernel_size)+', should be a int or tuple and equal to or greater than 1.') + + str(self.kernel_size) + ', should be a int or tuple and equal to or greater than 1.') self.stride = validator.check_integer('stride', stride, 1, Rel.GE) from .cus_conv2d_impl import Cus_Conv2D @@ -147,4 +149,4 @@ class Cus_Conv2D(PrimitiveWithInfer): def infer_dtype(self, x_dtype, w_dtype): args = {'x_dtype': x_dtype, 'w_dtype': w_dtype} validator.check_type_same(args, [mstype.int8, mstype.int32, mstype.float16, mstype.float32]) - return x_dtype \ No newline at end of file + return x_dtype diff --git a/tests/st/ops/custom_ops_tbe/cus_square.py b/tests/st/ops/custom_ops_tbe/cus_square.py index d006f75b4c..a0cfd1dd14 100644 --- a/tests/st/ops/custom_ops_tbe/cus_square.py +++ b/tests/st/ops/custom_ops_tbe/cus_square.py @@ -20,6 +20,7 @@ from mindspore import Tensor # y = x^2 class CusSquare(PrimitiveWithInfer): """CusSquare definition""" + @prim_attr_register def __init__(self): """init CusSquare""" diff --git a/tests/st/ops/custom_ops_tbe/test_cus_conv.py b/tests/st/ops/custom_ops_tbe/test_cus_conv.py index 4927417306..5205dd8b6b 100644 --- a/tests/st/ops/custom_ops_tbe/test_cus_conv.py +++ b/tests/st/ops/custom_ops_tbe/test_cus_conv.py @@ -20,31 +20,34 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from .cus_conv2d import Cus_Conv2D + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() out_channel = 64 kernel_size = 7 self.conv = Cus_Conv2D(out_channel, - kernel_size, - mode=1, - pad_mode="valid", - pad=0, - stride=1, - dilation=1, - group=1) + kernel_size, + mode=1, + pad_mode="valid", + pad=0, + stride=1, + dilation=1, + group=1) self.w = Parameter(initializer( 'normal', [64, 3, 7, 7]), name='w') - @ms_function def construct(self, x): return self.conv(x, self.w) + def test_net(): np.random.seed(3800) - x = np.random.randn(32,3,224,224).astype(np.float32) + x = np.random.randn(32, 3, 224, 224).astype(np.float32) conv = Net() output = conv(Tensor(x)) print(output.asnumpy()) diff --git a/tests/st/ops/custom_ops_tbe/test_square.py b/tests/st/ops/custom_ops_tbe/test_square.py index d8439000f8..189c710bbc 100644 --- a/tests/st/ops/custom_ops_tbe/test_square.py +++ b/tests/st/ops/custom_ops_tbe/test_square.py @@ -18,8 +18,10 @@ import mindspore.context as context from mindspore import Tensor from cus_square import CusSquare import pytest + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + class Net(nn.Cell): """Net definition""" @@ -30,6 +32,7 @@ class Net(nn.Cell): def construct(self, data): return self.square(data) + @pytest.mark.level0 @pytest.mark.platform_x86_ascend_training @pytest.mark.platform_arm_ascend_training @@ -40,5 +43,5 @@ def test_net(): output = square(Tensor(x)) print(x) print(output.asnumpy()) - expect = np.array([1.0,16.0,81.0]).astype(np.float32) - assert (output.asnumpy() == expect).all() \ No newline at end of file + expect = np.array([1.0, 16.0, 81.0]).astype(np.float32) + assert (output.asnumpy() == expect).all() diff --git a/tests/st/ops/gpu/test_addn_op.py b/tests/st/ops/gpu/test_addn_op.py index ce48e364fa..8dd6682f2d 100644 --- a/tests/st/ops/gpu/test_addn_op.py +++ b/tests/st/ops/gpu/test_addn_op.py @@ -33,6 +33,7 @@ class Net(nn.Cell): def construct(self, x, y, z): return self.add((x, y, z)) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard diff --git a/tests/st/ops/gpu/test_argmax_op.py b/tests/st/ops/gpu/test_argmax_op.py index 8d5fd2fd7f..33e573c01c 100644 --- a/tests/st/ops/gpu/test_argmax_op.py +++ b/tests/st/ops/gpu/test_argmax_op.py @@ -21,11 +21,12 @@ from mindspore.common import dtype as mstype import mindspore.nn as nn import mindspore.context as context + class NetArgmax(nn.Cell): - def __init__( self): + def __init__(self): super(NetArgmax, self).__init__() axis1 = 0 - axis2 = -1 + axis2 = -1 self.argmax1 = P.Argmax(axis1, output_type=mstype.int32) self.argmax2 = P.Argmax(axis2, output_type=mstype.int32) self.argmax3 = P.Argmax(output_type=mstype.int32) @@ -33,27 +34,28 @@ class NetArgmax(nn.Cell): def construct(self, x): return (self.argmax1(x), self.argmax2(x), self.argmax3(x)) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_argmax(): x = Tensor(np.array([[1., 20., 5.], - [67., 8., 9.], - [130., 24., 15.], - [0.3, -0.4, -15.]]).astype(np.float32)) - expect1 = np.array([2,2,2]).astype(np.int32) - expect2 = np.array([1,0,0,0]).astype(np.int32) + [67., 8., 9.], + [130., 24., 15.], + [0.3, -0.4, -15.]]).astype(np.float32)) + expect1 = np.array([2, 2, 2]).astype(np.int32) + expect2 = np.array([1, 0, 0, 0]).astype(np.int32) context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") Argmax = NetArgmax() output = Argmax(x) assert (output[0].asnumpy() == expect1).all() assert (output[1].asnumpy() == expect2).all() - assert (output[2].asnumpy() == expect2).all() + assert (output[2].asnumpy() == expect2).all() context.set_context(mode=context.GRAPH_MODE, device_target="GPU") Argmax1 = NetArgmax() output1 = Argmax(x) assert (output1[0].asnumpy() == expect1).all() assert (output1[1].asnumpy() == expect2).all() - assert (output1[2].asnumpy() == expect2).all() + assert (output1[2].asnumpy() == expect2).all() diff --git a/tests/st/ops/gpu/test_assign_add_op.py b/tests/st/ops/gpu/test_assign_add_op.py index 4c95177fb6..93d9b7d53f 100644 --- a/tests/st/ops/gpu/test_assign_add_op.py +++ b/tests/st/ops/gpu/test_assign_add_op.py @@ -20,6 +20,7 @@ import mindspore.nn as nn import numpy as np import mindspore.context as context + class AssignAdd(nn.Cell): def __init__(self, value): super(AssignAdd, self).__init__() @@ -30,21 +31,22 @@ class AssignAdd(nn.Cell): res = self.add(self.var, y) return res + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_assign_add(): - expect1 = np.array([[[[ 0, 2, 4.], - [ 6, 8, 10.], - [12, 14, 16.]], - [[18, 20, 22.], - [24, 26, 28.], - [30, 32, 34.]], - [[36, 38, 40.], - [42, 44, 46.], - [48, 50, 52.]]]]) - expect2 = np.array([[[[ 0, 3, 6], - [ 9, 12, 15], + expect1 = np.array([[[[0, 2, 4.], + [6, 8, 10.], + [12, 14, 16.]], + [[18, 20, 22.], + [24, 26, 28.], + [30, 32, 34.]], + [[36, 38, 40.], + [42, 44, 46.], + [48, 50, 52.]]]]) + expect2 = np.array([[[[0, 3, 6], + [9, 12, 15], [18, 21, 24]], [[27, 30, 33], [36, 39, 42], diff --git a/tests/st/ops/gpu/test_assign_op.py b/tests/st/ops/gpu/test_assign_op.py index f1fb908268..f7bf5ebb94 100644 --- a/tests/st/ops/gpu/test_assign_op.py +++ b/tests/st/ops/gpu/test_assign_op.py @@ -30,9 +30,11 @@ class Net(nn.Cell): def construct(self, value): return self.assign(self.var, value) + x = np.array([[1.2, 1], [1, 0]]).astype(np.float32) value = np.array([[1, 2], [3, 4.0]]).astype(np.float32) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard diff --git a/tests/st/ops/gpu/test_batch_matmul.py b/tests/st/ops/gpu/test_batch_matmul.py index ebf7bb397b..4eff96f412 100644 --- a/tests/st/ops/gpu/test_batch_matmul.py +++ b/tests/st/ops/gpu/test_batch_matmul.py @@ -24,6 +24,7 @@ import mindspore.nn as nn import mindspore.context as context from mindspore.common import dtype as mstype + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @@ -35,6 +36,7 @@ class BatchMatMulNet(nn.Cell): def construct(self, x, y): return self.batch_matmul(x, y) + def test_4D(): input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float32) input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32) @@ -42,15 +44,15 @@ def test_4D(): context.set_context(mode=context.GRAPH_MODE, device_target="GPU") net = BatchMatMulNet() output = net(input_x, input_y) - expect = [[[[ 20, 23, 26, 29]], - [[ 200, 212, 224, 236]], - [[ 596, 617, 638, 659]], - [[1208, 1238, 1268, 1298]]], + expect = [[[[20, 23, 26, 29]], + [[200, 212, 224, 236]], + [[596, 617, 638, 659]], + [[1208, 1238, 1268, 1298]]], [[[2036, 2075, 2114, 2153]], - [[3080, 3128, 3176, 3224]], - [[4340, 4397, 4454, 4511]], - [[5816, 5882, 5948, 6014]]]] + [[3080, 3128, 3176, 3224]], + [[4340, 4397, 4454, 4511]], + [[5816, 5882, 5948, 6014]]]] assert (output.asnumpy() == expect).all() @@ -58,21 +60,21 @@ def test_4D(): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_4D_transpose_a(): - input_x = Tensor(np.arange(2*4*3*1).reshape(2,4,3,1), mstype.float32) - input_y = Tensor(np.arange(2*4*3*4).reshape(2,4,3,4), mstype.float32) + input_x = Tensor(np.arange(2 * 4 * 3 * 1).reshape(2, 4, 3, 1), mstype.float32) + input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32) context.set_context(mode=context.GRAPH_MODE, device_target="GPU") net = BatchMatMulNet(transpose_a=True) output = net(input_x, input_y) - expect = [[[[ 20, 23, 26, 29]], - [[ 200, 212, 224, 236]], - [[ 596, 617, 638, 659]], - [[1208, 1238, 1268, 1298]]], + expect = [[[[20, 23, 26, 29]], + [[200, 212, 224, 236]], + [[596, 617, 638, 659]], + [[1208, 1238, 1268, 1298]]], [[[2036, 2075, 2114, 2153]], - [[3080, 3128, 3176, 3224]], - [[4340, 4397, 4454, 4511]], - [[5816, 5882, 5948, 6014]]]] + [[3080, 3128, 3176, 3224]], + [[4340, 4397, 4454, 4511]], + [[5816, 5882, 5948, 6014]]]] assert (output.asnumpy() == expect).all() @@ -80,21 +82,21 @@ def test_4D_transpose_a(): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_4D_transpose_b(): - input_x = Tensor(np.arange(2*4*1*3).reshape(2,4,1,3), mstype.float32) - input_y = Tensor(np.arange(2*4*4*3).reshape(2,4,4,3), mstype.float32) + input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float32) + input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32) context.set_context(mode=context.GRAPH_MODE, device_target="GPU") net = BatchMatMulNet(transpose_b=True) output = net(input_x, input_y) - expect = [[[[ 5, 14, 23, 32]], - [[ 158, 194, 230, 266]], - [[ 527, 590, 653, 716]], - [[1112, 1202, 1292, 1382]]], + expect = [[[[5, 14, 23, 32]], + [[158, 194, 230, 266]], + [[527, 590, 653, 716]], + [[1112, 1202, 1292, 1382]]], [[[1913, 2030, 2147, 2264]], - [[2930, 3074, 3218, 3362]], - [[4163, 4334, 4505, 4676]], - [[5612, 5810, 6008, 6206]]]] + [[2930, 3074, 3218, 3362]], + [[4163, 4334, 4505, 4676]], + [[5612, 5810, 6008, 6206]]]] assert (output.asnumpy() == expect).all() @@ -102,23 +104,24 @@ def test_4D_transpose_b(): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_4D_transpose_ab(): - input_x = Tensor(np.arange(2*4*3*1).reshape(2,4,3,1), mstype.float32) - input_y = Tensor(np.arange(2*4*4*3).reshape(2,4,4,3), mstype.float32) + input_x = Tensor(np.arange(2 * 4 * 3 * 1).reshape(2, 4, 3, 1), mstype.float32) + input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32) context.set_context(mode=context.GRAPH_MODE, device_target="GPU") net = BatchMatMulNet(transpose_a=True, transpose_b=True) output = net(input_x, input_y) - expect = [[[[ 5, 14, 23, 32]], - [[ 158, 194, 230, 266]], - [[ 527, 590, 653, 716]], - [[1112, 1202, 1292, 1382]]], + expect = [[[[5, 14, 23, 32]], + [[158, 194, 230, 266]], + [[527, 590, 653, 716]], + [[1112, 1202, 1292, 1382]]], [[[1913, 2030, 2147, 2264]], - [[2930, 3074, 3218, 3362]], - [[4163, 4334, 4505, 4676]], - [[5612, 5810, 6008, 6206]]]] + [[2930, 3074, 3218, 3362]], + [[4163, 4334, 4505, 4676]], + [[5612, 5810, 6008, 6206]]]] assert (output.asnumpy() == expect).all() + class BatchMatMulNet(nn.Cell): def __init__(self, transpose_a=False, transpose_b=False): super(BatchMatMulNet, self).__init__() @@ -127,6 +130,7 @@ class BatchMatMulNet(nn.Cell): def construct(self, x, y): return self.batch_matmul(x, y) + def test_4D_fp16(): input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float16) input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float16) @@ -134,13 +138,13 @@ def test_4D_fp16(): context.set_context(mode=context.GRAPH_MODE, device_target="GPU") net = BatchMatMulNet() output = net(input_x, input_y) - expect = [[[[ 20, 23, 26, 29]], - [[ 200, 212, 224, 236]], - [[ 596, 617, 638, 659]], - [[1208, 1238, 1268, 1298]]], + expect = [[[[20, 23, 26, 29]], + [[200, 212, 224, 236]], + [[596, 617, 638, 659]], + [[1208, 1238, 1268, 1298]]], [[[2036, 2075, 2114, 2153]], - [[3080, 3128, 3176, 3224]], - [[4340, 4397, 4454, 4511]], - [[5816, 5882, 5948, 6014]]]] + [[3080, 3128, 3176, 3224]], + [[4340, 4397, 4454, 4511]], + [[5816, 5882, 5948, 6014]]]] assert (output.asnumpy() == expect).all() diff --git a/tests/st/ops/gpu/test_batchnorm_op.py b/tests/st/ops/gpu/test_batchnorm_op.py index 4ca9ee8dd3..a7ff153016 100644 --- a/tests/st/ops/gpu/test_batchnorm_op.py +++ b/tests/st/ops/gpu/test_batchnorm_op.py @@ -96,6 +96,7 @@ def test_train_forward(): bn_net.set_train(False) output = bn_net(Tensor(x)) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard diff --git a/tests/st/ops/gpu/test_broadcast_op.py b/tests/st/ops/gpu/test_broadcast_op.py index 66345599e6..b124543fe7 100644 --- a/tests/st/ops/gpu/test_broadcast_op.py +++ b/tests/st/ops/gpu/test_broadcast_op.py @@ -21,6 +21,7 @@ import mindspore.common.dtype as mstype import mindspore.context as context import numpy as np + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @@ -63,7 +64,6 @@ def test_nobroadcast(): assert np.allclose(output_ms.asnumpy(), output_np) - @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @@ -105,6 +105,7 @@ def test_broadcast(): output_np = x1_np - x2_np assert np.allclose(output_ms.asnumpy(), output_np) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @@ -144,4 +145,4 @@ def test_broadcast_diff_dims(): output_ms = P.Sub()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np - x2_np - assert np.allclose(output_ms.asnumpy(), output_np) \ No newline at end of file + assert np.allclose(output_ms.asnumpy(), output_np) diff --git a/tests/st/ops/gpu/test_concatv2_op.py b/tests/st/ops/gpu/test_concatv2_op.py index 02e2258ffd..0c461fe49f 100644 --- a/tests/st/ops/gpu/test_concatv2_op.py +++ b/tests/st/ops/gpu/test_concatv2_op.py @@ -114,6 +114,7 @@ def test_axis21(): assert (output.asnumpy() == expect).all() print(output) + class Concat3INet(nn.Cell): def __init__(self): super(Concat3INet, self).__init__() @@ -122,6 +123,7 @@ class Concat3INet(nn.Cell): def construct(self, x1, x2, x3): return self.cat((x1, x2, x3)) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @@ -137,7 +139,7 @@ def test_concat_3i(): x2_ms = Tensor(x2_np) x3_ms = Tensor(x3_np) output_ms = cat(x1_ms, x2_ms, x3_ms) - + error = np.ones(shape=output_np.shape) * 10e-6 diff = output_ms.asnumpy() - output_np assert np.all(diff < error) @@ -151,6 +153,7 @@ class Concat4INet(nn.Cell): def construct(self, x1, x2, x3, x4): return self.cat((x1, x2, x3, x4)) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @@ -168,7 +171,7 @@ def test_concat_4i(): x3_ms = Tensor(x3_np) x4_ms = Tensor(x4_np) output_ms = cat(x1_ms, x2_ms, x3_ms, x4_ms) - + error = np.ones(shape=output_np.shape) * 10e-6 diff = output_ms.asnumpy() - output_np assert np.all(diff < error) diff --git a/tests/st/ops/gpu/test_conv2d_backprop_filter_op.py b/tests/st/ops/gpu/test_conv2d_backprop_filter_op.py index 0f66f2fac5..aa9abb2b90 100644 --- a/tests/st/ops/gpu/test_conv2d_backprop_filter_op.py +++ b/tests/st/ops/gpu/test_conv2d_backprop_filter_op.py @@ -45,6 +45,7 @@ class Conv2dFilter(nn.Cell): def construct(self, out, x, w): return self.conv_filter(out, x, self.get_shape(w)) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard diff --git a/tests/st/ops/gpu/test_correction_mul_grad_op.py b/tests/st/ops/gpu/test_correction_mul_grad_op.py index 88b391a77a..ab434447ef 100644 --- a/tests/st/ops/gpu/test_correction_mul_grad_op.py +++ b/tests/st/ops/gpu/test_correction_mul_grad_op.py @@ -22,7 +22,6 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import mindspore.context as context - context.set_context(device_target='GPU') diff --git a/tests/st/ops/gpu/test_equalcount_op.py b/tests/st/ops/gpu/test_equalcount_op.py index a48182d506..7cd55de446 100644 --- a/tests/st/ops/gpu/test_equalcount_op.py +++ b/tests/st/ops/gpu/test_equalcount_op.py @@ -20,6 +20,7 @@ from mindspore.ops import operations as P import mindspore.nn as nn import mindspore.context as context + class NetEqualCount(nn.Cell): def __init__(self): super(NetEqualCount, self).__init__() @@ -28,6 +29,7 @@ class NetEqualCount(nn.Cell): def construct(self, x, y): return self.equalcount(x, y) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard diff --git a/tests/st/ops/gpu/test_exp_op.py b/tests/st/ops/gpu/test_exp_op.py index 9397839793..ddab797545 100644 --- a/tests/st/ops/gpu/test_exp_op.py +++ b/tests/st/ops/gpu/test_exp_op.py @@ -29,6 +29,7 @@ class NetExp(nn.Cell): def construct(self, x): return self.exp(x) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard diff --git a/tests/st/ops/gpu/test_float_status_op.py b/tests/st/ops/gpu/test_float_status_op.py index 09fc90feaa..2b474187e9 100644 --- a/tests/st/ops/gpu/test_float_status_op.py +++ b/tests/st/ops/gpu/test_float_status_op.py @@ -20,6 +20,7 @@ import mindspore.nn as nn import numpy as np import mindspore.context as context + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -28,6 +29,7 @@ class Net(nn.Cell): def construct(self, x): return self.status(x) + class Netnan(nn.Cell): def __init__(self): super(Netnan, self).__init__() @@ -36,6 +38,7 @@ class Netnan(nn.Cell): def construct(self, x): return self.isnan(x) + class Netinf(nn.Cell): def __init__(self): super(Netinf, self).__init__() @@ -44,6 +47,7 @@ class Netinf(nn.Cell): def construct(self, x): return self.isinf(x) + class Netfinite(nn.Cell): def __init__(self): super(Netfinite, self).__init__() @@ -51,12 +55,14 @@ class Netfinite(nn.Cell): def construct(self, x): return self.isfinite(x) - + + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") x1 = np.array([[1.2, 2, np.nan, 88]]).astype(np.float32) x2 = np.array([[np.inf, 1, 88.0, 0]]).astype(np.float32) x3 = np.array([[1, 2], [3, 4], [5.0, 88.0]]).astype(np.float32) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @@ -72,6 +78,7 @@ def test_status(): assert output2.asnumpy()[0] == expect2 assert output3.asnumpy()[0] == expect3 + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @@ -87,6 +94,7 @@ def test_nan(): assert (output2.asnumpy() == expect2).all() assert (output3.asnumpy() == expect3).all() + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @@ -102,6 +110,7 @@ def test_inf(): assert (output2.asnumpy() == expect2).all() assert (output3.asnumpy() == expect3).all() + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard diff --git a/tests/st/ops/gpu/test_gelu_grad_op.py b/tests/st/ops/gpu/test_gelu_grad_op.py index 5891868d23..9e04c79366 100644 --- a/tests/st/ops/gpu/test_gelu_grad_op.py +++ b/tests/st/ops/gpu/test_gelu_grad_op.py @@ -23,6 +23,7 @@ from mindspore.ops import composite as C context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + class GeluNet(nn.Cell): def __init__(self): super(GeluNet, self).__init__() @@ -31,6 +32,7 @@ class GeluNet(nn.Cell): def construct(self, x): return self.gelu(x) + class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() @@ -46,16 +48,16 @@ class Grad(nn.Cell): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_gelugrad(): - x_ms = Tensor(np.array([0.58401114, 0.68800163, 0.9760397, 0.14702141, 0.46563736, 0.9607501, + x_ms = Tensor(np.array([0.58401114, 0.68800163, 0.9760397, 0.14702141, 0.46563736, 0.9607501, 0.14567593, 0.12261796, 0.37054458, 0.46421242]).astype(np.float32)) - dy_ms = Tensor(np.array([0.5559598, 0.96994054, 0.24770357, 0.34646875, 0.2984393, 0.03287048, - 0.55681044, 0.966908, 0.06015943, 0.6099489 ]).astype(np.float32)) + dy_ms = Tensor(np.array([0.5559598, 0.96994054, 0.24770357, 0.34646875, 0.2984393, 0.03287048, + 0.55681044, 0.966908, 0.06015943, 0.6099489]).astype(np.float32)) net = GeluNet() grad = Grad(net) output = grad(x_ms, dy_ms) print(output) - expect = [0.50963277, 0.9414753, 0.2667653, 0.21358444, 0.25243032, 0.0352667, + expect = [0.50963277, 0.9414753, 0.2667653, 0.21358444, 0.25243032, 0.0352667, 0.34266686, 0.57757664, 0.04707306, 0.51536125] - assert np.allclose(output[0].asnumpy(), expect) \ No newline at end of file + assert np.allclose(output[0].asnumpy(), expect) diff --git a/tests/st/ops/gpu/test_gelu_op.py b/tests/st/ops/gpu/test_gelu_op.py index 9238bbc71c..612df1530a 100644 --- a/tests/st/ops/gpu/test_gelu_op.py +++ b/tests/st/ops/gpu/test_gelu_op.py @@ -22,6 +22,7 @@ import mindspore.context as context context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + class GeluNet(nn.Cell): def __init__(self): super(GeluNet, self).__init__() @@ -34,6 +35,7 @@ class GeluNet(nn.Cell): def GeluCompute(x): return 0.5 * x * (1.0 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * x * x * x))) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @@ -61,6 +63,7 @@ def test_gelu_2d(): assert np.allclose(y_np, y_ms.asnumpy()) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @@ -74,6 +77,7 @@ def test_gelu_4d(): assert np.allclose(y_np, y_ms.asnumpy()) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard diff --git a/tests/st/ops/gpu/test_layer_norm_grad_op.py b/tests/st/ops/gpu/test_layer_norm_grad_op.py index 8b6bf6018e..f4cd907990 100644 --- a/tests/st/ops/gpu/test_layer_norm_grad_op.py +++ b/tests/st/ops/gpu/test_layer_norm_grad_op.py @@ -22,9 +22,9 @@ from mindspore.ops import composite as C import mindspore.nn as nn import mindspore.context as context - context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + class LayerNormGradNet(nn.Cell): def __init__(self, begin_norm_axis, begin_params_axis): super(LayerNormGradNet, self).__init__() @@ -33,9 +33,10 @@ class LayerNormGradNet(nn.Cell): def construct(self, dy, x, var, mean, gamma): return self.norm(dy, x, var, mean, gamma) + def LayerNormGradReference(x, dy, gamma, epsilon, begin_norm_axis, begin_params_axis): - begin_norm_axis = begin_norm_axis if begin_norm_axis >=0 else begin_norm_axis + len(x.shape) - begin_params_axis = begin_params_axis if begin_params_axis >=0 else begin_params_axis + len(x.shape) + begin_norm_axis = begin_norm_axis if begin_norm_axis >= 0 else begin_norm_axis + len(x.shape) + begin_params_axis = begin_params_axis if begin_params_axis >= 0 else begin_params_axis + len(x.shape) norm_axis = [i for i in range(begin_norm_axis, len(x.shape))] param_axis = [i for i in range(0, begin_params_axis)] @@ -46,11 +47,12 @@ def LayerNormGradReference(x, dy, gamma, epsilon, begin_norm_axis, begin_params_ mean = np.mean(x, axis=tuple(norm_axis), keepdims=True) var = np.var(x, axis=tuple(norm_axis), keepdims=True) - gamma = gamma.reshape((*((1,)*begin_params_axis), *x.shape[begin_params_axis:])) + gamma = gamma.reshape((*((1,) * begin_params_axis), *x.shape[begin_params_axis:])) dg = np.sum(dy * np.power(var + epsilon, -0.5) * (x - mean), axis=tuple(param_axis), keepdims=True) db = np.sum(dy, axis=tuple(param_axis), keepdims=True) - sum1 = np.sum((-0.5) * dy * gamma * (x - mean) * np.power(var + epsilon, -1.5), axis=tuple(norm_axis), keepdims=True) + sum1 = np.sum((-0.5) * dy * gamma * (x - mean) * np.power(var + epsilon, -1.5), axis=tuple(norm_axis), + keepdims=True) sum2 = np.sum(dy * gamma, axis=tuple(norm_axis), keepdims=True) sum3 = np.sum(-2.0 * (x - mean), axis=tuple(norm_axis), keepdims=True) @@ -71,7 +73,8 @@ def test_layernormgrad0(): dy_np = np.random.randn(4096, 3072).astype(np.float32) gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32) epsilon = 10e-12 - dx_np, dg_np, db_np, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis, begin_params_axis) + dx_np, dg_np, db_np, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis, + begin_params_axis) dy_ms = Tensor(dy_np) x_ms = Tensor(x_np) @@ -87,7 +90,6 @@ def test_layernormgrad0(): assert np.allclose(db_ms.asnumpy(), db_np, rtol=1e-6, atol=1e-3) - @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @@ -98,7 +100,8 @@ def test_layernormgrad1(): dy_np = np.random.randn(640, 768).astype(np.float32) gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32) epsilon = 10e-12 - dx_np, dg_np, db_np, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis, begin_params_axis) + dx_np, dg_np, db_np, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis, + begin_params_axis) dy_ms = Tensor(dy_np) x_ms = Tensor(x_np) @@ -124,7 +127,8 @@ def test_layernormgrad2(): dy_np = np.random.randn(32, 128, 768).astype(np.float32) gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32) epsilon = 10e-12 - dx_np, dg_np, db_np, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis, begin_params_axis) + dx_np, dg_np, db_np, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis, + begin_params_axis) dy_ms = Tensor(dy_np) x_ms = Tensor(x_np) diff --git a/tests/st/ops/gpu/test_layer_norm_op.py b/tests/st/ops/gpu/test_layer_norm_op.py index a281cd0f5f..3a39577f3c 100644 --- a/tests/st/ops/gpu/test_layer_norm_op.py +++ b/tests/st/ops/gpu/test_layer_norm_op.py @@ -31,16 +31,17 @@ class LayerNormNet(nn.Cell): def construct(self, x, gamma, beta): return self.norm(x, gamma, beta) + def LayerNormReference(begin_norm_axis, begin_params_axis, x, gamma, beta): - begin_norm_axis = begin_norm_axis if begin_norm_axis >=0 else begin_norm_axis + len(x.shape) - begin_params_axis = begin_params_axis if begin_params_axis >=0 else begin_params_axis + len(x.shape) + begin_norm_axis = begin_norm_axis if begin_norm_axis >= 0 else begin_norm_axis + len(x.shape) + begin_params_axis = begin_params_axis if begin_params_axis >= 0 else begin_params_axis + len(x.shape) axis = [i for i in range(begin_norm_axis, len(x.shape))] mean = np.mean(x, axis=tuple(axis), keepdims=True) - var = np.var(x, axis=tuple(axis), keepdims=True) + var = np.var(x, axis=tuple(axis), keepdims=True) - gamma = gamma.reshape((*((1,)*begin_params_axis), *x.shape[begin_params_axis:])) - beta = beta.reshape((*((1,)*begin_params_axis), *x.shape[begin_params_axis:])) + gamma = gamma.reshape((*((1,) * begin_params_axis), *x.shape[begin_params_axis:])) + beta = beta.reshape((*((1,) * begin_params_axis), *x.shape[begin_params_axis:])) y = np.subtract(x, mean) / np.sqrt(var + 1e-12) * gamma + beta return y, mean, var @@ -84,7 +85,6 @@ def test_layernorm1(): net = LayerNormNet(begin_norm_axis, begin_params_axis) y_ms, mean_ms, var_ms = net(x_ms, gamma_ms, beta_ms) - assert np.allclose(y_ms.asnumpy(), y_np, rtol=1e-6, atol=1e-6) assert np.allclose(mean_ms.asnumpy(), mean_np, rtol=1e-6, atol=1e-6) assert np.allclose(var_ms.asnumpy(), var_np, rtol=1e-6, atol=1e-6) diff --git a/tests/st/ops/gpu/test_lessequal_op.py b/tests/st/ops/gpu/test_lessequal_op.py index 08bb28b0af..09b04815f0 100644 --- a/tests/st/ops/gpu/test_lessequal_op.py +++ b/tests/st/ops/gpu/test_lessequal_op.py @@ -46,4 +46,3 @@ def test_lessequal(): lessequal = Net() output = lessequal(x, y) assert np.all(output.asnumpy() == expect) - diff --git a/tests/st/ops/gpu/test_logical_op.py b/tests/st/ops/gpu/test_logical_op.py index ab95aa8f3f..4af58ce43e 100644 --- a/tests/st/ops/gpu/test_logical_op.py +++ b/tests/st/ops/gpu/test_logical_op.py @@ -29,6 +29,7 @@ class NetAnd(Cell): def construct(self, x, y): return self.logicaland(x, y) + class NetOr(Cell): def __init__(self): super(NetOr, self).__init__() @@ -37,6 +38,7 @@ class NetOr(Cell): def construct(self, x, y): return self.logicalor(x, y) + class NetNot(Cell): def __init__(self): super(NetNot, self).__init__() @@ -45,9 +47,11 @@ class NetNot(Cell): def construct(self, x): return self.logicalnot(x) + x = np.array([True, False, False]).astype(np.bool) y = np.array([False]).astype(np.bool) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @@ -62,6 +66,7 @@ def test_logicaland(): output = logicaland(Tensor(x), Tensor(y)) assert np.all(output.asnumpy() == np.logical_and(x, y)) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @@ -76,6 +81,7 @@ def test_logicalor(): output = logicalor(Tensor(x), Tensor(y)) assert np.all(output.asnumpy() == np.logical_or(x, y)) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @@ -89,4 +95,3 @@ def test_logicalnot(): logicalnot = NetNot() output = logicalnot(Tensor(x)) assert np.all(output.asnumpy() == np.logical_not(x)) - diff --git a/tests/st/ops/gpu/test_logsoftmax_op.py b/tests/st/ops/gpu/test_logsoftmax_op.py index f87a7d7557..ee54f76354 100644 --- a/tests/st/ops/gpu/test_logsoftmax_op.py +++ b/tests/st/ops/gpu/test_logsoftmax_op.py @@ -21,16 +21,17 @@ from mindspore.ops import composite as C import mindspore.nn as nn import mindspore.context as context + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_logsoftmax(): - x = np.array([[-0.08082921, -0.13706027, -0.4711177, -0.05606057], - [-0.46082982, 1.1761844, -1.016654, -1.743829 ], - [-1.5062045, 0.6910976, 0.4839723, 1.1502692 ]]).astype(np.float32) - expect = np.array([[-1.2939762, -1.3502073, -1.6842647, -1.2692076 ], - [-1.9445671, -0.3075528, -2.5003912, -3.2275662 ], - [-3.452001, -1.2546989, -1.4618242, -0.79552734]]).astype(np.float32) + x = np.array([[-0.08082921, -0.13706027, -0.4711177, -0.05606057], + [-0.46082982, 1.1761844, -1.016654, -1.743829], + [-1.5062045, 0.6910976, 0.4839723, 1.1502692]]).astype(np.float32) + expect = np.array([[-1.2939762, -1.3502073, -1.6842647, -1.2692076], + [-1.9445671, -0.3075528, -2.5003912, -3.2275662], + [-3.452001, -1.2546989, -1.4618242, -0.79552734]]).astype(np.float32) context.set_context(mode=context.GRAPH_MODE, device_target="GPU") LogSoftmax = P.LogSoftmax() @@ -46,6 +47,7 @@ class LogSoftmax(nn.Cell): def construct(self, x): return self.logsoftmax(x) + class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() @@ -61,21 +63,36 @@ class Grad(nn.Cell): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_logsoftmaxgrad(): - x = np.array([[-0.47705367, 0.48267725, -1.0453935, 1.574488, 0.20362134, 0.4435456, -0.23984082, -0.43684655, -0.7725506, 1.4481013 ], - [ 1.1012247, 1.7069651, 0.55062026, 0.3361901, -1.1082426, -0.5001939, -0.3255393, -0.7972024, -0.27965206, -0.702805 ], - [ 0.19450496, 0.87596166, 0.6467245, -1.044987, 0.5248943, -2.6166635, 1.6719198, 0.06600758, -0.4099178, 1.1861311 ], - [ 1.1305193, -1.97308, 2.1047623, -1.5105937, 0.93052036, 1.2467804, 0.5310002, 0.7084912, -1.3681422, -0.9686862 ], - [ 1.871408, 0.14219497, -0.41050452, -0.749807, 1.4900619, -1.8172716, -0.73839617, 0.17565694, -0.4553867, -1.5423119 ]]).astype(np.float32) - dy = np.array([[ 1.516363, -0.15196544, 0.598733, 0.64357865, 0.16265012, -1.3521105, 0.22621834, 0.7168259, -0.6709239, 0.79757756], - [-0.32457778, 1.2831115, 1.1211495, -0.02665559, 1.9170904, -1.3397789, 1.4124829, -1.4298155, 0.758519, -0.25322974], - [-0.24226122, -1.2555921, 0.6492511, -0.34847677, 0.19916506, 0.628554, -0.19658111, 0.44939864, -0.11677749, -1.2131723 ], - [ 0.24267715, 0.28106326, 1.1075432, -0.29006946, 0.31335673, 0.8833154, 0.13152207, 1.5482179, 0.29770762, -0.16246222], - [ 0.02145994, 0.80424, -0.95061, 1.5875458, -0.00308682, 0.17964548, 0.49912593, 0.46977136, 0.2151897, 0.30908248]]).astype(np.float32) - expect = np.array([[ 1.4219905 , -0.39837134, 0.5452743 , -0.09062839, -0.02375537, -1.5890603 , 0.10658137, 0.6185817 , -0.7411523 , 0.15054005], - [-0.94926417, 0.13830578, 0.7609547 , -0.31733334, 1.8485254 , -1.4657221 , 1.2625053 , -1.523396 , 0.601499 , -0.35607445], - [-0.14447737, -1.0622973 , 0.80294746, -0.32016528, 0.33523226, 0.63443416, 0.23186903, 0.53539133, -0.0633494 , -0.9495847 ], - [-0.36894822, 0.253609 , -0.5127511 , -0.33366728, -0.18740037, 0.19628316, -0.20430653, 1.1471655 , 0.24743511, -0.23741922], - [-1.2582518 , 0.57718843, -1.0812542 , 1.4944922 , -0.8770549 , 0.1476463 , 0.40500447, 0.23499368, 0.09027944, 0.26695627]]).astype(np.float32) + x = np.array([[-0.47705367, 0.48267725, -1.0453935, 1.574488, 0.20362134, 0.4435456, -0.23984082, -0.43684655, + -0.7725506, 1.4481013], + [1.1012247, 1.7069651, 0.55062026, 0.3361901, -1.1082426, -0.5001939, -0.3255393, -0.7972024, + -0.27965206, -0.702805], + [0.19450496, 0.87596166, 0.6467245, -1.044987, 0.5248943, -2.6166635, 1.6719198, 0.06600758, + -0.4099178, 1.1861311], + [1.1305193, -1.97308, 2.1047623, -1.5105937, 0.93052036, 1.2467804, 0.5310002, 0.7084912, -1.3681422, + -0.9686862], + [1.871408, 0.14219497, -0.41050452, -0.749807, 1.4900619, -1.8172716, -0.73839617, 0.17565694, + -0.4553867, -1.5423119]]).astype(np.float32) + dy = np.array([[1.516363, -0.15196544, 0.598733, 0.64357865, 0.16265012, -1.3521105, 0.22621834, 0.7168259, + -0.6709239, 0.79757756], + [-0.32457778, 1.2831115, 1.1211495, -0.02665559, 1.9170904, -1.3397789, 1.4124829, -1.4298155, + 0.758519, -0.25322974], + [-0.24226122, -1.2555921, 0.6492511, -0.34847677, 0.19916506, 0.628554, -0.19658111, 0.44939864, + -0.11677749, -1.2131723], + [0.24267715, 0.28106326, 1.1075432, -0.29006946, 0.31335673, 0.8833154, 0.13152207, 1.5482179, + 0.29770762, -0.16246222], + [0.02145994, 0.80424, -0.95061, 1.5875458, -0.00308682, 0.17964548, 0.49912593, 0.46977136, + 0.2151897, 0.30908248]]).astype(np.float32) + expect = np.array([[1.4219905, -0.39837134, 0.5452743, -0.09062839, -0.02375537, -1.5890603, 0.10658137, 0.6185817, + -0.7411523, 0.15054005], + [-0.94926417, 0.13830578, 0.7609547, -0.31733334, 1.8485254, -1.4657221, 1.2625053, -1.523396, + 0.601499, -0.35607445], + [-0.14447737, -1.0622973, 0.80294746, -0.32016528, 0.33523226, 0.63443416, 0.23186903, + 0.53539133, -0.0633494, -0.9495847], + [-0.36894822, 0.253609, -0.5127511, -0.33366728, -0.18740037, 0.19628316, -0.20430653, 1.1471655, + 0.24743511, -0.23741922], + [-1.2582518, 0.57718843, -1.0812542, 1.4944922, -0.8770549, 0.1476463, 0.40500447, 0.23499368, + 0.09027944, 0.26695627]]).astype(np.float32) context.set_context(mode=context.GRAPH_MODE, device_target="GPU") net = LogSoftmax() @@ -87,21 +104,36 @@ def test_logsoftmaxgrad(): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_logsoftmaxgrad1(): - x = np.array([[-0.47705367, 0.48267725, -1.0453935, 1.574488, 0.20362134, 0.4435456, -0.23984082, -0.43684655, -0.7725506, 1.4481013 ], - [ 1.1012247, 1.7069651, 0.55062026, 0.3361901, -1.1082426, -0.5001939, -0.3255393, -0.7972024, -0.27965206, -0.702805 ], - [ 0.19450496, 0.87596166, 0.6467245, -1.044987, 0.5248943, -2.6166635, 1.6719198, 0.06600758, -0.4099178, 1.1861311 ], - [ 1.1305193, -1.97308, 2.1047623, -1.5105937, 0.93052036, 1.2467804, 0.5310002, 0.7084912, -1.3681422, -0.9686862 ], - [ 1.871408, 0.14219497, -0.41050452, -0.749807, 1.4900619, -1.8172716, -0.73839617, 0.17565694, -0.4553867, -1.5423119 ]]).astype(np.float32) - dy = np.array([[ 1.516363, -0.15196544, 0.598733, 0.64357865, 0.16265012, -1.3521105, 0.22621834, 0.7168259, -0.6709239, 0.79757756], - [-0.32457778, 1.2831115, 1.1211495, -0.02665559, 1.9170904, -1.3397789, 1.4124829, -1.4298155, 0.758519, -0.25322974], - [-0.24226122, -1.2555921, 0.6492511, -0.34847677, 0.19916506, 0.628554, -0.19658111, 0.44939864, -0.11677749, -1.2131723 ], - [ 0.24267715, 0.28106326, 1.1075432, -0.29006946, 0.31335673, 0.8833154, 0.13152207, 1.5482179, 0.29770762, -0.16246222], - [ 0.02145994, 0.80424, -0.95061, 1.5875458, -0.00308682, 0.17964548, 0.49912593, 0.46977136, 0.2151897, 0.30908248]]).astype(np.float32) - expect = np.array([[ 1.464194 , -0.29578894, 0.5296974 , -0.39600563, -0.1479242 , -1.0869746 , 0.04521982, 0.5064515 , -0.7515615 , 1.0554069 ], - [-0.5774203 , 0.793861 , 0.7805745 , -0.32800734, 1.8334473 , -1.236596 , 1.2463496 , -1.5765365 , 0.6265108 , -0.22322391], - [-0.34437084, -1.4687154 , 0.27432096, -0.42420125, -0.22908019, 0.640983 , -1.4210342 , 0.10155854, -0.23266247, -1.0147638 ], - [-0.01768187, 0.26872346, -0.5037259 , -0.3376058 , -0.3291146 , 1.4752979 , -0.25972134, 0.8869053 , 0.25325722, -0.13946185], - [-0.5247209 , 0.70192003, -1.0808672 , 1.4858199 , -1.1273282 , 0.20728993, 0.38918605, 0.08162117, 0.10445589, 0.3220427 ]],).astype(np.float32) + x = np.array([[-0.47705367, 0.48267725, -1.0453935, 1.574488, 0.20362134, 0.4435456, -0.23984082, -0.43684655, + -0.7725506, 1.4481013], + [1.1012247, 1.7069651, 0.55062026, 0.3361901, -1.1082426, -0.5001939, -0.3255393, -0.7972024, + -0.27965206, -0.702805], + [0.19450496, 0.87596166, 0.6467245, -1.044987, 0.5248943, -2.6166635, 1.6719198, 0.06600758, + -0.4099178, 1.1861311], + [1.1305193, -1.97308, 2.1047623, -1.5105937, 0.93052036, 1.2467804, 0.5310002, 0.7084912, -1.3681422, + -0.9686862], + [1.871408, 0.14219497, -0.41050452, -0.749807, 1.4900619, -1.8172716, -0.73839617, 0.17565694, + -0.4553867, -1.5423119]]).astype(np.float32) + dy = np.array([[1.516363, -0.15196544, 0.598733, 0.64357865, 0.16265012, -1.3521105, 0.22621834, 0.7168259, + -0.6709239, 0.79757756], + [-0.32457778, 1.2831115, 1.1211495, -0.02665559, 1.9170904, -1.3397789, 1.4124829, -1.4298155, + 0.758519, -0.25322974], + [-0.24226122, -1.2555921, 0.6492511, -0.34847677, 0.19916506, 0.628554, -0.19658111, 0.44939864, + -0.11677749, -1.2131723], + [0.24267715, 0.28106326, 1.1075432, -0.29006946, 0.31335673, 0.8833154, 0.13152207, 1.5482179, + 0.29770762, -0.16246222], + [0.02145994, 0.80424, -0.95061, 1.5875458, -0.00308682, 0.17964548, 0.49912593, 0.46977136, + 0.2151897, 0.30908248]]).astype(np.float32) + expect = np.array([[1.464194, -0.29578894, 0.5296974, -0.39600563, -0.1479242, -1.0869746, 0.04521982, 0.5064515, + -0.7515615, 1.0554069], + [-0.5774203, 0.793861, 0.7805745, -0.32800734, 1.8334473, -1.236596, 1.2463496, -1.5765365, + 0.6265108, -0.22322391], + [-0.34437084, -1.4687154, 0.27432096, -0.42420125, -0.22908019, 0.640983, -1.4210342, 0.10155854, + -0.23266247, -1.0147638], + [-0.01768187, 0.26872346, -0.5037259, -0.3376058, -0.3291146, 1.4752979, -0.25972134, 0.8869053, + 0.25325722, -0.13946185], + [-0.5247209, 0.70192003, -1.0808672, 1.4858199, -1.1273282, 0.20728993, 0.38918605, 0.08162117, + 0.10445589, 0.3220427]], ).astype(np.float32) context.set_context(mode=context.GRAPH_MODE, device_target="GPU") net = LogSoftmax(0) diff --git a/tests/st/ops/gpu/test_maximum_op.py b/tests/st/ops/gpu/test_maximum_op.py index edfb93e926..7f6c03d718 100644 --- a/tests/st/ops/gpu/test_maximum_op.py +++ b/tests/st/ops/gpu/test_maximum_op.py @@ -30,6 +30,7 @@ class Net(Cell): def construct(self, x, y): return self.max(x, y) + class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() @@ -40,6 +41,7 @@ class Grad(Cell): gout = self.grad(self.network)(x1, x2, sens) return gout + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard @@ -70,7 +72,7 @@ def test_maximum(): def test_broadcast(): context.set_context(mode=context.GRAPH_MODE, save_graphs=True, device_target='GPU') - x1_np = np.array([[[[0.659578 ], + x1_np = np.array([[[[0.659578], [0.49113268], [0.75909054], [0.71681815], @@ -80,141 +82,140 @@ def test_broadcast(): [0.06398096], [0.09519596], [0.12498625]]], - [[[0.7347768 ], - [0.166469 ], - [0.328553 ], + [[[0.7347768], + [0.166469], + [0.328553], [0.54908437], [0.23673844]]]]).astype(np.float32) - x2_np = np.array([[[[0.9154968, 0.29014662, 0.6492294, 0.39918253, 0.1648203, 0.00861965]], - [[0.996885, 0.24152198, 0.3601213, 0.51664376, 0.7933056, 0.84706444]], - [[0.75606346, 0.974512, 0.3939527, 0.69697475, 0.83400667, 0.6348955 ]], - [[0.68492866, 0.24609096, 0.4924665, 0.22500521, 0.38474053, 0.5586104 ]]]]).astype(np.float32) - dy_np = np.array([[[[0.42891738, 0.03434946, 0.06192983, 0.21216309, 0.37450036, 0.6619524 ], - [0.8583447, 0.5765161, 0.1468952, 0.9975385, 0.6908136, 0.4903796 ], - [0.68952006, 0.39336833, 0.9049695, 0.66886294, 0.2338471, 0.913618 ], - [0.0428149, 0.6243054, 0.8519898, 0.12088962, 0.9735885, 0.45661286], - [0.41563734, 0.41607043, 0.4754915, 0.32207987, 0.33823156, 0.47422352]], - - [[0.64478457, 0.22430937, 0.7682554, 0.46082005, 0.8938723, 0.20490853], - [0.44393885, 0.08278944, 0.4734108, 0.5543551, 0.39428464, 0.44424313], + x2_np = np.array([[[[0.9154968, 0.29014662, 0.6492294, 0.39918253, 0.1648203, 0.00861965]], + [[0.996885, 0.24152198, 0.3601213, 0.51664376, 0.7933056, 0.84706444]], + [[0.75606346, 0.974512, 0.3939527, 0.69697475, 0.83400667, 0.6348955]], + [[0.68492866, 0.24609096, 0.4924665, 0.22500521, 0.38474053, 0.5586104]]]]).astype(np.float32) + dy_np = np.array([[[[0.42891738, 0.03434946, 0.06192983, 0.21216309, 0.37450036, 0.6619524], + [0.8583447, 0.5765161, 0.1468952, 0.9975385, 0.6908136, 0.4903796], + [0.68952006, 0.39336833, 0.9049695, 0.66886294, 0.2338471, 0.913618], + [0.0428149, 0.6243054, 0.8519898, 0.12088962, 0.9735885, 0.45661286], + [0.41563734, 0.41607043, 0.4754915, 0.32207987, 0.33823156, 0.47422352]], + + [[0.64478457, 0.22430937, 0.7682554, 0.46082005, 0.8938723, 0.20490853], + [0.44393885, 0.08278944, 0.4734108, 0.5543551, 0.39428464, 0.44424313], [0.12612297, 0.76566416, 0.71133816, 0.81280327, 0.20583127, 0.54058075], - [0.41341263, 0.48118508, 0.00401995, 0.37259838, 0.05435474, 0.5240658 ], - [0.4081956, 0.48718935, 0.9132831, 0.67969185, 0.0119757, 0.8328054 ]], - - [[0.91695577, 0.95370644, 0.263782, 0.7477626, 0.6448147, 0.8080634 ], - [0.15576603, 0.9104615, 0.3778708, 0.6912833, 0.2092224, 0.67462957], - [0.7087075, 0.7888326, 0.4672294, 0.98221505, 0.25210258, 0.98920417], - [0.7466197, 0.22702982, 0.01991269, 0.6846591, 0.7515228, 0.5890395 ], - [0.04531088, 0.21740614, 0.8406235, 0.36480767, 0.37733936, 0.02914464]], - - [[0.33069974, 0.5497569, 0.9896345, 0.4167176, 0.78057563, 0.04659131], - [0.7747768, 0.21427679, 0.29893255, 0.7706969, 0.9755185, 0.42388415], - [0.3910244, 0.39381978, 0.37065396, 0.15558061, 0.05012341, 0.15870963], - [0.17791101, 0.47219893, 0.13899496, 0.32323205, 0.3628809, 0.02580585], - [0.30274773, 0.62890774, 0.11024303, 0.6980051, 0.35346958, 0.062852 ]]], - - - [[[0.6925081, 0.74668753, 0.80145043, 0.06598313, 0.665123, 0.15073007], - [0.11784806, 0.6385372, 0.5228278, 0.5349848, 0.84671104, 0.8096436 ], - [0.09516156, 0.63298017, 0.52382874, 0.36734378, 0.66497755, 0.6019127 ], - [0.46438488, 0.0194377, 0.9388292, 0.7286089, 0.29178405, 0.11872514], - [0.22101837, 0.6164887, 0.6139798, 0.11711904, 0.6227745, 0.09701069]], - - [[0.80480653, 0.90034056, 0.8633447, 0.97415197, 0.08309154, 0.8446033 ], - [0.9473769, 0.791024, 0.26339203, 0.01155075, 0.2673186, 0.7116369 ], - [0.9687511, 0.24281934, 0.37777108, 0.09802654, 0.2421312, 0.87095344], - [0.6311381, 0.23368953, 0.0998995, 0.4364419, 0.9187446, 0.5043872 ], - [0.35226053, 0.09357589, 0.41317305, 0.85930043, 0.16249318, 0.5478765 ]], - - [[0.14338651, 0.24859418, 0.4246941, 0.73034066, 0.47172204, 0.8717199 ], - [0.05415315, 0.78556925, 0.99214983, 0.7415298, 0.673708, 0.87817156], - [0.616975, 0.42843062, 0.05179814, 0.1566958, 0.04536059, 0.70166487], - [0.15493333, 0.776598, 0.4361967, 0.40253627, 0.89210516, 0.8144414 ], - [0.04816005, 0.29696834, 0.4586605, 0.3419852, 0.5595613, 0.74093205]], - - [[0.1388035, 0.9168704, 0.64287645, 0.83864623, 0.48026922, 0.78323376], + [0.41341263, 0.48118508, 0.00401995, 0.37259838, 0.05435474, 0.5240658], + [0.4081956, 0.48718935, 0.9132831, 0.67969185, 0.0119757, 0.8328054]], + + [[0.91695577, 0.95370644, 0.263782, 0.7477626, 0.6448147, 0.8080634], + [0.15576603, 0.9104615, 0.3778708, 0.6912833, 0.2092224, 0.67462957], + [0.7087075, 0.7888326, 0.4672294, 0.98221505, 0.25210258, 0.98920417], + [0.7466197, 0.22702982, 0.01991269, 0.6846591, 0.7515228, 0.5890395], + [0.04531088, 0.21740614, 0.8406235, 0.36480767, 0.37733936, 0.02914464]], + + [[0.33069974, 0.5497569, 0.9896345, 0.4167176, 0.78057563, 0.04659131], + [0.7747768, 0.21427679, 0.29893255, 0.7706969, 0.9755185, 0.42388415], + [0.3910244, 0.39381978, 0.37065396, 0.15558061, 0.05012341, 0.15870963], + [0.17791101, 0.47219893, 0.13899496, 0.32323205, 0.3628809, 0.02580585], + [0.30274773, 0.62890774, 0.11024303, 0.6980051, 0.35346958, 0.062852]]], + + [[[0.6925081, 0.74668753, 0.80145043, 0.06598313, 0.665123, 0.15073007], + [0.11784806, 0.6385372, 0.5228278, 0.5349848, 0.84671104, 0.8096436], + [0.09516156, 0.63298017, 0.52382874, 0.36734378, 0.66497755, 0.6019127], + [0.46438488, 0.0194377, 0.9388292, 0.7286089, 0.29178405, 0.11872514], + [0.22101837, 0.6164887, 0.6139798, 0.11711904, 0.6227745, 0.09701069]], + + [[0.80480653, 0.90034056, 0.8633447, 0.97415197, 0.08309154, 0.8446033], + [0.9473769, 0.791024, 0.26339203, 0.01155075, 0.2673186, 0.7116369], + [0.9687511, 0.24281934, 0.37777108, 0.09802654, 0.2421312, 0.87095344], + [0.6311381, 0.23368953, 0.0998995, 0.4364419, 0.9187446, 0.5043872], + [0.35226053, 0.09357589, 0.41317305, 0.85930043, 0.16249318, 0.5478765]], + + [[0.14338651, 0.24859418, 0.4246941, 0.73034066, 0.47172204, 0.8717199], + [0.05415315, 0.78556925, 0.99214983, 0.7415298, 0.673708, 0.87817156], + [0.616975, 0.42843062, 0.05179814, 0.1566958, 0.04536059, 0.70166487], + [0.15493333, 0.776598, 0.4361967, 0.40253627, 0.89210516, 0.8144414], + [0.04816005, 0.29696834, 0.4586605, 0.3419852, 0.5595613, 0.74093205]], + + [[0.1388035, 0.9168704, 0.64287645, 0.83864623, 0.48026922, 0.78323376], [0.12724937, 0.83034366, 0.42557436, 0.50578654, 0.25630295, 0.15349793], - [0.27256685, 0.04547984, 0.5385756, 0.39270344, 0.7661698, 0.23722854], - [0.24620503, 0.25431684, 0.71564585, 0.01161419, 0.846467, 0.7043044 ], - [0.63272387, 0.11857849, 0.3772076, 0.16758402, 0.46743023, 0.05919575]]], + [0.27256685, 0.04547984, 0.5385756, 0.39270344, 0.7661698, 0.23722854], + [0.24620503, 0.25431684, 0.71564585, 0.01161419, 0.846467, 0.7043044], + [0.63272387, 0.11857849, 0.3772076, 0.16758402, 0.46743023, 0.05919575]]], - - [[[0.18827082, 0.8912264, 0.6841404, 0.74436826, 0.9582085, 0.1083683 ], + [[[0.18827082, 0.8912264, 0.6841404, 0.74436826, 0.9582085, 0.1083683], [0.60695344, 0.09742349, 0.25074378, 0.87940735, 0.21116392, 0.39418384], - [0.744686, 0.35679692, 0.01308284, 0.45166633, 0.68166, 0.8634658 ], - [0.7331758, 0.21113694, 0.3935488, 0.87934476, 0.70728546, 0.09309767], - [0.12128611, 0.93696386, 0.81177396, 0.85402405, 0.5827289, 0.9776509 ]], + [0.744686, 0.35679692, 0.01308284, 0.45166633, 0.68166, 0.8634658], + [0.7331758, 0.21113694, 0.3935488, 0.87934476, 0.70728546, 0.09309767], + [0.12128611, 0.93696386, 0.81177396, 0.85402405, 0.5827289, 0.9776509]], - [[0.54069614, 0.66651285, 0.10646132, 0.17342485, 0.88795924, 0.03551182], + [[0.54069614, 0.66651285, 0.10646132, 0.17342485, 0.88795924, 0.03551182], [0.25531697, 0.87946486, 0.74267226, 0.89230734, 0.95171434, 0.94697934], - [0.3708397, 0.507355, 0.97099817, 0.4918163, 0.17212386, 0.5008048 ], - [0.62530744, 0.25210327, 0.73966664, 0.71555346, 0.82484317, 0.6094874 ], - [0.4589691, 0.1386695, 0.27448782, 0.20373994, 0.27805242, 0.23292768]], - - [[0.7414099, 0.2270226, 0.90431255, 0.47035843, 0.9581062, 0.5359226 ], - [0.79603523, 0.45549425, 0.80858237, 0.7705133, 0.017761, 0.98001194], - [0.06013146, 0.99240226, 0.33515573, 0.04110833, 0.41470334, 0.7130743 ], - [0.5687417, 0.5788611, 0.00722461, 0.6603336, 0.3420471, 0.75181854], - [0.4699261, 0.51390815, 0.343182, 0.81498754, 0.8942413, 0.46532857]], - - [[0.4589523, 0.5534698, 0.2825786, 0.8205943, 0.78258514, 0.43154418], - [0.27020997, 0.01667354, 0.60871965, 0.90670526, 0.3208025, 0.96995634], - [0.85337156, 0.9711295, 0.1381724, 0.53670496, 0.7347996, 0.73380876], - [0.6137464, 0.54751194, 0.9037335, 0.23134394, 0.61411524, 0.26583543], - [0.70770144, 0.01813207, 0.24718016, 0.70329237, 0.7062925, 0.14399007]]]]).astype(np.float32) - - expect_dx1 = np.array([[[[ 6.6534014 ], - [ 5.649811 ], - [10.071739 ], - [ 6.6798244 ], - [ 3.0426278 ]]], - [[[ 4.2183976 ], - [ 0.8096436 ], - [ 0.6019127 ], - [ 0.11872514], - [ 0.09701069]]], - [[[ 9.573029 ], - [ 0.60534775], - [ 3.917112 ], - [ 5.9021177 ], - [ 2.263672 ]]]]).astype(np.float32) - - expect_dx2 = np.array([[[[6.4205275, 2.941831 , 5.492452 , 4.3212175, 2.4262471, 0. ]], - [[7.991917 , 2.3792431, 4.9190216, 5.2013817, 6.348791 , 8.351772 ]], - [[5.518505 , 8.401285 , 4.691043 , 6.463884 , 7.504318 , 7.620938 ]], - [[5.2708025, 1.2835244, 4.1031275, 1.9843934, 4.9320035, 4.537787 ]]]]).astype(np.float32) + [0.3708397, 0.507355, 0.97099817, 0.4918163, 0.17212386, 0.5008048], + [0.62530744, 0.25210327, 0.73966664, 0.71555346, 0.82484317, 0.6094874], + [0.4589691, 0.1386695, 0.27448782, 0.20373994, 0.27805242, 0.23292768]], + + [[0.7414099, 0.2270226, 0.90431255, 0.47035843, 0.9581062, 0.5359226], + [0.79603523, 0.45549425, 0.80858237, 0.7705133, 0.017761, 0.98001194], + [0.06013146, 0.99240226, 0.33515573, 0.04110833, 0.41470334, 0.7130743], + [0.5687417, 0.5788611, 0.00722461, 0.6603336, 0.3420471, 0.75181854], + [0.4699261, 0.51390815, 0.343182, 0.81498754, 0.8942413, 0.46532857]], + + [[0.4589523, 0.5534698, 0.2825786, 0.8205943, 0.78258514, 0.43154418], + [0.27020997, 0.01667354, 0.60871965, 0.90670526, 0.3208025, 0.96995634], + [0.85337156, 0.9711295, 0.1381724, 0.53670496, 0.7347996, 0.73380876], + [0.6137464, 0.54751194, 0.9037335, 0.23134394, 0.61411524, 0.26583543], + [0.70770144, 0.01813207, 0.24718016, 0.70329237, 0.7062925, 0.14399007]]]]).astype(np.float32) + + expect_dx1 = np.array([[[[6.6534014], + [5.649811], + [10.071739], + [6.6798244], + [3.0426278]]], + [[[4.2183976], + [0.8096436], + [0.6019127], + [0.11872514], + [0.09701069]]], + [[[9.573029], + [0.60534775], + [3.917112], + [5.9021177], + [2.263672]]]]).astype(np.float32) + + expect_dx2 = np.array([[[[6.4205275, 2.941831, 5.492452, 4.3212175, 2.4262471, 0.]], + [[7.991917, 2.3792431, 4.9190216, 5.2013817, 6.348791, 8.351772]], + [[5.518505, 8.401285, 4.691043, 6.463884, 7.504318, 7.620938]], + [[5.2708025, 1.2835244, 4.1031275, 1.9843934, 4.9320035, 4.537787]]]]).astype(np.float32) net = Grad(Net()) output_ms = net(Tensor(x1_np), Tensor(x2_np), Tensor(dy_np)) assert np.allclose(output_ms[0].asnumpy(), expect_dx1) assert np.allclose(output_ms[1].asnumpy(), expect_dx2) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_broadcast_diff_dims(): context.set_context(mode=context.GRAPH_MODE, save_graphs=True, device_target='GPU') - x1_np = np.array([[[0.275478, 0.48933202, 0.71846116], - [0.9803821, 0.57205725, 0.28511533]], - [[0.61111903, 0.9671023, 0.70624334], + x1_np = np.array([[[0.275478, 0.48933202, 0.71846116], + [0.9803821, 0.57205725, 0.28511533]], + [[0.61111903, 0.9671023, 0.70624334], [0.53730786, 0.90413177, 0.94349676]]]).astype(np.float32) - x2_np = np.array([[0.01045662, 0.82126397, 0.6365063 ], - [0.9900942, 0.6584232, 0.98537433]]).astype(np.float32) + x2_np = np.array([[0.01045662, 0.82126397, 0.6365063], + [0.9900942, 0.6584232, 0.98537433]]).astype(np.float32) - dy_np = np.array([[[0.3897645, 0.61152864, 0.33675498], - [0.5303635, 0.84893036, 0.4959739 ]], - [[0.5391046, 0.8443047, 0.4174708 ], - [0.57513475, 0.9225578, 0.46760973]]]).astype(np.float32) + dy_np = np.array([[[0.3897645, 0.61152864, 0.33675498], + [0.5303635, 0.84893036, 0.4959739]], + [[0.5391046, 0.8443047, 0.4174708], + [0.57513475, 0.9225578, 0.46760973]]]).astype(np.float32) - expect_dx1 = np.array([[[0.3897645 , 0. , 0.33675498], - [0. , 0. , 0. ]], - [[0.5391046 , 0.8443047 , 0.4174708 ], - [0. , 0.9225578 , 0. ]]]).astype(np.float32) + expect_dx1 = np.array([[[0.3897645, 0., 0.33675498], + [0., 0., 0.]], + [[0.5391046, 0.8443047, 0.4174708], + [0., 0.9225578, 0.]]]).astype(np.float32) - expect_dx2 = np.array([[0. , 0.61152864, 0. ], - [1.1054983 , 0.84893036, 0.96358365]]).astype(np.float32) + expect_dx2 = np.array([[0., 0.61152864, 0.], + [1.1054983, 0.84893036, 0.96358365]]).astype(np.float32) net = Grad(Net()) output_ms = net(Tensor(x1_np), Tensor(x2_np), Tensor(dy_np)) diff --git a/tests/st/ops/gpu/test_minimum_op.py b/tests/st/ops/gpu/test_minimum_op.py index 4616f80c19..4297c1a77d 100644 --- a/tests/st/ops/gpu/test_minimum_op.py +++ b/tests/st/ops/gpu/test_minimum_op.py @@ -22,6 +22,7 @@ import mindspore.common.dtype as mstype import mindspore.context as context import numpy as np + class MinimumNet(Cell): def __init__(self): super(MinimumNet, self).__init__() @@ -67,7 +68,7 @@ def test_nobroadcast(): def test_broadcast(): context.set_context(mode=context.GRAPH_MODE, save_graphs=True, device_target='GPU') - x1_np = np.array([[[[0.659578 ], + x1_np = np.array([[[[0.659578], [0.49113268], [0.75909054], [0.71681815], @@ -77,109 +78,107 @@ def test_broadcast(): [0.06398096], [0.09519596], [0.12498625]]], - [[[0.7347768 ], - [0.166469 ], - [0.328553 ], + [[[0.7347768], + [0.166469], + [0.328553], [0.54908437], [0.23673844]]]]).astype(np.float32) - x2_np = np.array([[[[0.9154968, 0.29014662, 0.6492294, 0.39918253, 0.1648203, 0.00861965]], - [[0.996885, 0.24152198, 0.3601213, 0.51664376, 0.7933056, 0.84706444]], - [[0.75606346, 0.974512, 0.3939527, 0.69697475, 0.83400667, 0.6348955 ]], - [[0.68492866, 0.24609096, 0.4924665, 0.22500521, 0.38474053, 0.5586104 ]]]]).astype(np.float32) - dy_np = np.array([[[[0.42891738, 0.03434946, 0.06192983, 0.21216309, 0.37450036, 0.6619524 ], - [0.8583447, 0.5765161, 0.1468952, 0.9975385, 0.6908136, 0.4903796 ], - [0.68952006, 0.39336833, 0.9049695, 0.66886294, 0.2338471, 0.913618 ], - [0.0428149, 0.6243054, 0.8519898, 0.12088962, 0.9735885, 0.45661286], - [0.41563734, 0.41607043, 0.4754915, 0.32207987, 0.33823156, 0.47422352]], - - [[0.64478457, 0.22430937, 0.7682554, 0.46082005, 0.8938723, 0.20490853], - [0.44393885, 0.08278944, 0.4734108, 0.5543551, 0.39428464, 0.44424313], + x2_np = np.array([[[[0.9154968, 0.29014662, 0.6492294, 0.39918253, 0.1648203, 0.00861965]], + [[0.996885, 0.24152198, 0.3601213, 0.51664376, 0.7933056, 0.84706444]], + [[0.75606346, 0.974512, 0.3939527, 0.69697475, 0.83400667, 0.6348955]], + [[0.68492866, 0.24609096, 0.4924665, 0.22500521, 0.38474053, 0.5586104]]]]).astype(np.float32) + dy_np = np.array([[[[0.42891738, 0.03434946, 0.06192983, 0.21216309, 0.37450036, 0.6619524], + [0.8583447, 0.5765161, 0.1468952, 0.9975385, 0.6908136, 0.4903796], + [0.68952006, 0.39336833, 0.9049695, 0.66886294, 0.2338471, 0.913618], + [0.0428149, 0.6243054, 0.8519898, 0.12088962, 0.9735885, 0.45661286], + [0.41563734, 0.41607043, 0.4754915, 0.32207987, 0.33823156, 0.47422352]], + + [[0.64478457, 0.22430937, 0.7682554, 0.46082005, 0.8938723, 0.20490853], + [0.44393885, 0.08278944, 0.4734108, 0.5543551, 0.39428464, 0.44424313], [0.12612297, 0.76566416, 0.71133816, 0.81280327, 0.20583127, 0.54058075], - [0.41341263, 0.48118508, 0.00401995, 0.37259838, 0.05435474, 0.5240658 ], - [0.4081956, 0.48718935, 0.9132831, 0.67969185, 0.0119757, 0.8328054 ]], - - [[0.91695577, 0.95370644, 0.263782, 0.7477626, 0.6448147, 0.8080634 ], - [0.15576603, 0.9104615, 0.3778708, 0.6912833, 0.2092224, 0.67462957], - [0.7087075, 0.7888326, 0.4672294, 0.98221505, 0.25210258, 0.98920417], - [0.7466197, 0.22702982, 0.01991269, 0.6846591, 0.7515228, 0.5890395 ], - [0.04531088, 0.21740614, 0.8406235, 0.36480767, 0.37733936, 0.02914464]], - - [[0.33069974, 0.5497569, 0.9896345, 0.4167176, 0.78057563, 0.04659131], - [0.7747768, 0.21427679, 0.29893255, 0.7706969, 0.9755185, 0.42388415], - [0.3910244, 0.39381978, 0.37065396, 0.15558061, 0.05012341, 0.15870963], - [0.17791101, 0.47219893, 0.13899496, 0.32323205, 0.3628809, 0.02580585], - [0.30274773, 0.62890774, 0.11024303, 0.6980051, 0.35346958, 0.062852 ]]], - - - [[[0.6925081, 0.74668753, 0.80145043, 0.06598313, 0.665123, 0.15073007], - [0.11784806, 0.6385372, 0.5228278, 0.5349848, 0.84671104, 0.8096436 ], - [0.09516156, 0.63298017, 0.52382874, 0.36734378, 0.66497755, 0.6019127 ], - [0.46438488, 0.0194377, 0.9388292, 0.7286089, 0.29178405, 0.11872514], - [0.22101837, 0.6164887, 0.6139798, 0.11711904, 0.6227745, 0.09701069]], - - [[0.80480653, 0.90034056, 0.8633447, 0.97415197, 0.08309154, 0.8446033 ], - [0.9473769, 0.791024, 0.26339203, 0.01155075, 0.2673186, 0.7116369 ], - [0.9687511, 0.24281934, 0.37777108, 0.09802654, 0.2421312, 0.87095344], - [0.6311381, 0.23368953, 0.0998995, 0.4364419, 0.9187446, 0.5043872 ], - [0.35226053, 0.09357589, 0.41317305, 0.85930043, 0.16249318, 0.5478765 ]], - - [[0.14338651, 0.24859418, 0.4246941, 0.73034066, 0.47172204, 0.8717199 ], - [0.05415315, 0.78556925, 0.99214983, 0.7415298, 0.673708, 0.87817156], - [0.616975, 0.42843062, 0.05179814, 0.1566958, 0.04536059, 0.70166487], - [0.15493333, 0.776598, 0.4361967, 0.40253627, 0.89210516, 0.8144414 ], - [0.04816005, 0.29696834, 0.4586605, 0.3419852, 0.5595613, 0.74093205]], - - [[0.1388035, 0.9168704, 0.64287645, 0.83864623, 0.48026922, 0.78323376], + [0.41341263, 0.48118508, 0.00401995, 0.37259838, 0.05435474, 0.5240658], + [0.4081956, 0.48718935, 0.9132831, 0.67969185, 0.0119757, 0.8328054]], + + [[0.91695577, 0.95370644, 0.263782, 0.7477626, 0.6448147, 0.8080634], + [0.15576603, 0.9104615, 0.3778708, 0.6912833, 0.2092224, 0.67462957], + [0.7087075, 0.7888326, 0.4672294, 0.98221505, 0.25210258, 0.98920417], + [0.7466197, 0.22702982, 0.01991269, 0.6846591, 0.7515228, 0.5890395], + [0.04531088, 0.21740614, 0.8406235, 0.36480767, 0.37733936, 0.02914464]], + + [[0.33069974, 0.5497569, 0.9896345, 0.4167176, 0.78057563, 0.04659131], + [0.7747768, 0.21427679, 0.29893255, 0.7706969, 0.9755185, 0.42388415], + [0.3910244, 0.39381978, 0.37065396, 0.15558061, 0.05012341, 0.15870963], + [0.17791101, 0.47219893, 0.13899496, 0.32323205, 0.3628809, 0.02580585], + [0.30274773, 0.62890774, 0.11024303, 0.6980051, 0.35346958, 0.062852]]], + + [[[0.6925081, 0.74668753, 0.80145043, 0.06598313, 0.665123, 0.15073007], + [0.11784806, 0.6385372, 0.5228278, 0.5349848, 0.84671104, 0.8096436], + [0.09516156, 0.63298017, 0.52382874, 0.36734378, 0.66497755, 0.6019127], + [0.46438488, 0.0194377, 0.9388292, 0.7286089, 0.29178405, 0.11872514], + [0.22101837, 0.6164887, 0.6139798, 0.11711904, 0.6227745, 0.09701069]], + + [[0.80480653, 0.90034056, 0.8633447, 0.97415197, 0.08309154, 0.8446033], + [0.9473769, 0.791024, 0.26339203, 0.01155075, 0.2673186, 0.7116369], + [0.9687511, 0.24281934, 0.37777108, 0.09802654, 0.2421312, 0.87095344], + [0.6311381, 0.23368953, 0.0998995, 0.4364419, 0.9187446, 0.5043872], + [0.35226053, 0.09357589, 0.41317305, 0.85930043, 0.16249318, 0.5478765]], + + [[0.14338651, 0.24859418, 0.4246941, 0.73034066, 0.47172204, 0.8717199], + [0.05415315, 0.78556925, 0.99214983, 0.7415298, 0.673708, 0.87817156], + [0.616975, 0.42843062, 0.05179814, 0.1566958, 0.04536059, 0.70166487], + [0.15493333, 0.776598, 0.4361967, 0.40253627, 0.89210516, 0.8144414], + [0.04816005, 0.29696834, 0.4586605, 0.3419852, 0.5595613, 0.74093205]], + + [[0.1388035, 0.9168704, 0.64287645, 0.83864623, 0.48026922, 0.78323376], [0.12724937, 0.83034366, 0.42557436, 0.50578654, 0.25630295, 0.15349793], - [0.27256685, 0.04547984, 0.5385756, 0.39270344, 0.7661698, 0.23722854], - [0.24620503, 0.25431684, 0.71564585, 0.01161419, 0.846467, 0.7043044 ], - [0.63272387, 0.11857849, 0.3772076, 0.16758402, 0.46743023, 0.05919575]]], - + [0.27256685, 0.04547984, 0.5385756, 0.39270344, 0.7661698, 0.23722854], + [0.24620503, 0.25431684, 0.71564585, 0.01161419, 0.846467, 0.7043044], + [0.63272387, 0.11857849, 0.3772076, 0.16758402, 0.46743023, 0.05919575]]], - [[[0.18827082, 0.8912264, 0.6841404, 0.74436826, 0.9582085, 0.1083683 ], + [[[0.18827082, 0.8912264, 0.6841404, 0.74436826, 0.9582085, 0.1083683], [0.60695344, 0.09742349, 0.25074378, 0.87940735, 0.21116392, 0.39418384], - [0.744686, 0.35679692, 0.01308284, 0.45166633, 0.68166, 0.8634658 ], - [0.7331758, 0.21113694, 0.3935488, 0.87934476, 0.70728546, 0.09309767], - [0.12128611, 0.93696386, 0.81177396, 0.85402405, 0.5827289, 0.9776509 ]], + [0.744686, 0.35679692, 0.01308284, 0.45166633, 0.68166, 0.8634658], + [0.7331758, 0.21113694, 0.3935488, 0.87934476, 0.70728546, 0.09309767], + [0.12128611, 0.93696386, 0.81177396, 0.85402405, 0.5827289, 0.9776509]], - [[0.54069614, 0.66651285, 0.10646132, 0.17342485, 0.88795924, 0.03551182], + [[0.54069614, 0.66651285, 0.10646132, 0.17342485, 0.88795924, 0.03551182], [0.25531697, 0.87946486, 0.74267226, 0.89230734, 0.95171434, 0.94697934], - [0.3708397, 0.507355, 0.97099817, 0.4918163, 0.17212386, 0.5008048 ], - [0.62530744, 0.25210327, 0.73966664, 0.71555346, 0.82484317, 0.6094874 ], - [0.4589691, 0.1386695, 0.27448782, 0.20373994, 0.27805242, 0.23292768]], - - [[0.7414099, 0.2270226, 0.90431255, 0.47035843, 0.9581062, 0.5359226 ], - [0.79603523, 0.45549425, 0.80858237, 0.7705133, 0.017761, 0.98001194], - [0.06013146, 0.99240226, 0.33515573, 0.04110833, 0.41470334, 0.7130743 ], - [0.5687417, 0.5788611, 0.00722461, 0.6603336, 0.3420471, 0.75181854], - [0.4699261, 0.51390815, 0.343182, 0.81498754, 0.8942413, 0.46532857]], - - [[0.4589523, 0.5534698, 0.2825786, 0.8205943, 0.78258514, 0.43154418], - [0.27020997, 0.01667354, 0.60871965, 0.90670526, 0.3208025, 0.96995634], - [0.85337156, 0.9711295, 0.1381724, 0.53670496, 0.7347996, 0.73380876], - [0.6137464, 0.54751194, 0.9037335, 0.23134394, 0.61411524, 0.26583543], - [0.70770144, 0.01813207, 0.24718016, 0.70329237, 0.7062925, 0.14399007]]]]).astype(np.float32) - - expect_dx1 = np.array([[[[ 5.7664223], - [ 6.981018 ], - [ 2.6029902], - [ 2.7598202], - [ 6.763105 ]]], - [[[10.06558 ], - [12.077246 ], - [ 9.338394 ], - [11.52271 ], - [ 8.889048 ]]], - [[[ 3.5789769], - [13.424448 ], - [ 8.732746 ], - [ 6.9677467], - [ 9.635765 ]]]]).astype(np.float32) - - expect_dx2 = np.array([[[[0. , 4.250458 , 2.5030296 , 3.623167 , 6.4171505 , 7.2115746 ]], - [[0. , 4.367449 , 2.803152 , 2.5352 , 0. , 0. ]], - [[0.7087075 , 0. , 2.040332 , 2.1372325 , 0. , 2.9222295 ]], - [[1.0278877 , 5.247942 , 2.6855955 , 5.494814 , 3.5657988 , 0.66265094]]]]).astype(np.float32) + [0.3708397, 0.507355, 0.97099817, 0.4918163, 0.17212386, 0.5008048], + [0.62530744, 0.25210327, 0.73966664, 0.71555346, 0.82484317, 0.6094874], + [0.4589691, 0.1386695, 0.27448782, 0.20373994, 0.27805242, 0.23292768]], + + [[0.7414099, 0.2270226, 0.90431255, 0.47035843, 0.9581062, 0.5359226], + [0.79603523, 0.45549425, 0.80858237, 0.7705133, 0.017761, 0.98001194], + [0.06013146, 0.99240226, 0.33515573, 0.04110833, 0.41470334, 0.7130743], + [0.5687417, 0.5788611, 0.00722461, 0.6603336, 0.3420471, 0.75181854], + [0.4699261, 0.51390815, 0.343182, 0.81498754, 0.8942413, 0.46532857]], + + [[0.4589523, 0.5534698, 0.2825786, 0.8205943, 0.78258514, 0.43154418], + [0.27020997, 0.01667354, 0.60871965, 0.90670526, 0.3208025, 0.96995634], + [0.85337156, 0.9711295, 0.1381724, 0.53670496, 0.7347996, 0.73380876], + [0.6137464, 0.54751194, 0.9037335, 0.23134394, 0.61411524, 0.26583543], + [0.70770144, 0.01813207, 0.24718016, 0.70329237, 0.7062925, 0.14399007]]]]).astype(np.float32) + + expect_dx1 = np.array([[[[5.7664223], + [6.981018], + [2.6029902], + [2.7598202], + [6.763105]]], + [[[10.06558], + [12.077246], + [9.338394], + [11.52271], + [8.889048]]], + [[[3.5789769], + [13.424448], + [8.732746], + [6.9677467], + [9.635765]]]]).astype(np.float32) + + expect_dx2 = np.array([[[[0., 4.250458, 2.5030296, 3.623167, 6.4171505, 7.2115746]], + [[0., 4.367449, 2.803152, 2.5352, 0., 0.]], + [[0.7087075, 0., 2.040332, 2.1372325, 0., 2.9222295]], + [[1.0278877, 5.247942, 2.6855955, 5.494814, 3.5657988, 0.66265094]]]]).astype(np.float32) net = Grad(MinimumNet()) output_ms = net(Tensor(x1_np), Tensor(x2_np), Tensor(dy_np)) @@ -193,26 +192,26 @@ def test_broadcast(): def test_broadcast_diff_dims(): context.set_context(mode=context.GRAPH_MODE, save_graphs=True, device_target='GPU') - x1_np = np.array([[[0.275478, 0.48933202, 0.71846116], - [0.9803821, 0.57205725, 0.28511533]], - [[0.61111903, 0.9671023, 0.70624334], + x1_np = np.array([[[0.275478, 0.48933202, 0.71846116], + [0.9803821, 0.57205725, 0.28511533]], + [[0.61111903, 0.9671023, 0.70624334], [0.53730786, 0.90413177, 0.94349676]]]).astype(np.float32) - x2_np = np.array([[0.01045662, 0.82126397, 0.6365063 ], - [0.9900942, 0.6584232, 0.98537433]]).astype(np.float32) + x2_np = np.array([[0.01045662, 0.82126397, 0.6365063], + [0.9900942, 0.6584232, 0.98537433]]).astype(np.float32) - dy_np = np.array([[[0.3897645, 0.61152864, 0.33675498], - [0.5303635, 0.84893036, 0.4959739 ]], - [[0.5391046, 0.8443047, 0.4174708 ], - [0.57513475, 0.9225578, 0.46760973]]]).astype(np.float32) + dy_np = np.array([[[0.3897645, 0.61152864, 0.33675498], + [0.5303635, 0.84893036, 0.4959739]], + [[0.5391046, 0.8443047, 0.4174708], + [0.57513475, 0.9225578, 0.46760973]]]).astype(np.float32) - expect_dx1 = np.array([[[0. , 0.61152864, 0. ], - [0.5303635 , 0.84893036, 0.4959739 ]], - [[0. , 0. , 0. ], - [0.57513475, 0. , 0.46760973]]]).astype(np.float32) + expect_dx1 = np.array([[[0., 0.61152864, 0.], + [0.5303635, 0.84893036, 0.4959739]], + [[0., 0., 0.], + [0.57513475, 0., 0.46760973]]]).astype(np.float32) - expect_dx2 = np.array([[0.92886907, 0.8443047 , 0.7542258 ], - [0. , 0.9225578 , 0. ]]).astype(np.float32) + expect_dx2 = np.array([[0.92886907, 0.8443047, 0.7542258], + [0., 0.9225578, 0.]]).astype(np.float32) net = Grad(MinimumNet()) output_ms = net(Tensor(x1_np), Tensor(x2_np), Tensor(dy_np)) diff --git a/tests/st/ops/gpu/test_momentum_op.py b/tests/st/ops/gpu/test_momentum_op.py index d2b992003b..b3e248c2c9 100644 --- a/tests/st/ops/gpu/test_momentum_op.py +++ b/tests/st/ops/gpu/test_momentum_op.py @@ -25,6 +25,8 @@ from mindspore.common.initializer import initializer import mindspore.context as context context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + + class NetMomentum(nn.Cell): def __init__(self): super(NetMomentum, self).__init__() @@ -38,6 +40,7 @@ class NetMomentum(nn.Cell): output = self.fc1(output) return output + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard diff --git a/tests/st/ops/gpu/test_realdiv_op.py b/tests/st/ops/gpu/test_realdiv_op.py index e5dcc05190..ce911e2bb6 100644 --- a/tests/st/ops/gpu/test_realdiv_op.py +++ b/tests/st/ops/gpu/test_realdiv_op.py @@ -23,6 +23,7 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + class NetRealDiv(nn.Cell): def __init__(self): super(NetRealDiv, self).__init__() diff --git a/tests/st/ops/gpu/test_reciprocal_op.py b/tests/st/ops/gpu/test_reciprocal_op.py index fb422a94cf..c90dad5f54 100644 --- a/tests/st/ops/gpu/test_reciprocal_op.py +++ b/tests/st/ops/gpu/test_reciprocal_op.py @@ -29,6 +29,7 @@ class NetReciprocal(nn.Cell): def construct(self, x): return self.reciprocal(x) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard diff --git a/tests/st/ops/gpu/test_select_op.py b/tests/st/ops/gpu/test_select_op.py index 5cac6a6ad3..924f44504c 100644 --- a/tests/st/ops/gpu/test_select_op.py +++ b/tests/st/ops/gpu/test_select_op.py @@ -29,10 +29,12 @@ class Net(nn.Cell): def construct(self, cond, x, y): return self.select(cond, x, y) + cond = np.array([[True, False], [True, False]]).astype(np.bool) x = np.array([[1.2, 1], [1, 0]]).astype(np.float32) y = np.array([[1, 2], [3, 4.0]]).astype(np.float32) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard diff --git a/tests/st/ops/gpu/test_slice.py b/tests/st/ops/gpu/test_slice.py index 9846399481..110c7c87b9 100644 --- a/tests/st/ops/gpu/test_slice.py +++ b/tests/st/ops/gpu/test_slice.py @@ -53,6 +53,7 @@ class SliceNet(nn.Cell): def construct(self, x): return self.slice(x, (0, 11, 0, 0), (32, 7, 224, 224)) + def test_slice_4d(): x_np = np.random.randn(32, 24, 224, 224).astype(np.float32) output_np = x_np[:, 11:18, :, :] diff --git a/tests/st/ops/gpu/test_softmax_cross_entropy_with_logits_op.py b/tests/st/ops/gpu/test_softmax_cross_entropy_with_logits_op.py index 1d04634957..32034456af 100644 --- a/tests/st/ops/gpu/test_softmax_cross_entropy_with_logits_op.py +++ b/tests/st/ops/gpu/test_softmax_cross_entropy_with_logits_op.py @@ -22,8 +22,9 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + class NetSoftmaxCrossEntropyWithLogits(nn.Cell): - def __init__( self): + def __init__(self): super(NetSoftmaxCrossEntropyWithLogits, self).__init__() self.loss = nn.SoftmaxCrossEntropyWithLogits(sparse=False) @@ -35,12 +36,12 @@ class NetSoftmaxCrossEntropyWithLogits(nn.Cell): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_softmax_cross_entropy_with_logits(): - logits = Tensor(np.array([[1,1,10], - [1,10,1], - [10,1,1]]).astype(np.float32)) - labels = Tensor(np.array([[0,0,1], - [0,1,0], - [1,0,0]]).astype(np.float32)) + logits = Tensor(np.array([[1, 1, 10], + [1, 10, 1], + [10, 1, 1]]).astype(np.float32)) + labels = Tensor(np.array([[0, 0, 1], + [0, 1, 0], + [1, 0, 0]]).astype(np.float32)) expect_loss = [0.00024673, 0.00024673, 0.00024673] context.set_context(mode=context.GRAPH_MODE, device_target='GPU') diff --git a/tests/st/ops/gpu/test_softmax_op.py b/tests/st/ops/gpu/test_softmax_op.py index 6c7ae0c5ee..2e204432a3 100644 --- a/tests/st/ops/gpu/test_softmax_op.py +++ b/tests/st/ops/gpu/test_softmax_op.py @@ -21,6 +21,7 @@ from mindspore.ops import composite as C import mindspore.nn as nn import mindspore.context as context + class NetSoftmax(nn.Cell): def __init__(self): super(NetSoftmax, self).__init__() @@ -73,6 +74,7 @@ class Net(nn.Cell): def construct(self, x): return self.softmax1(x) + class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() @@ -89,105 +91,107 @@ class Grad(nn.Cell): @pytest.mark.env_onecard def test_softmax_4d(): context.set_context(mode=context.GRAPH_MODE, device_target="GPU") - x = np.array([[[[ 2.7866030e-01, 8.5578346e-01, -2.7546784e-01, -8.5833269e-01, 1.5753637e-01], - [-4.5145524e-01, 1.5590921e-01, -6.1947298e-01, -6.3499230e-01, -1.0625143e+00], - [-6.8716180e-01, -3.5565588e-01, 9.9680430e-01, -3.5519487e-01, 5.2122700e-01], - [-9.8125875e-01, 9.0505141e-01, 6.5961617e-01, 6.5950197e-01, 1.0319239e+00]], - [[-7.6588345e-01, -1.6929083e-01, 9.4459933e-01, -8.3931917e-01, 1.4916732e+00], - [ 8.1874236e-02, -1.9288104e-02, 7.3255712e-01, -1.4598954e-01, 1.1225560e+00], - [ 2.7356184e-01, 1.2557162e-01, 1.3796539e+00, 1.0073920e-01, 7.9203087e-01], - [-3.6947381e-01, 4.7919992e-01, 2.2421131e+00, -8.3911163e-01, 1.0814662e+00]], - [[-2.5838584e-01, 2.0765430e-01, -1.9366746e-01, 6.7511219e-01, -3.7492469e-01], - [ 4.4170797e-01, -9.9537361e-01, -3.5100895e-01, -7.8317386e-01, 1.1672008e-02], - [ 1.6037937e+00, -1.7059358e+00, -9.3724984e-01, -1.5016698e+00, -2.7605603e-02], - [ 1.6392696e-01, 1.0074581e+00, -2.7704465e+00, 8.1361882e-02, 7.9730105e-01]]], - [[[ 2.9516423e-01, 4.6354745e-02, 1.7318316e-01, 1.5894413e+00, -1.2769363e+00], - [ 2.8939021e-01, -3.8801813e-01, -1.3376296e+00, -4.9808905e-01, -3.2318991e-02], - [-1.1740140e+00, -1.1140432e+00, -1.4198960e-01, 5.8953021e-02, -3.6763316e-01], - [ 1.8660797e+00, -5.8705074e-01, 6.8757606e-01, -4.0573463e-01, -7.1130061e-01]], - [[ 2.6170531e-01, 5.4814044e-02, 1.3891056e-01, 3.4492522e-02, -1.0920379e-01], - [ 1.1420644e-01, 1.6939731e-01, -1.0413316e+00, -1.4040415e-01, -3.3280477e-01], - [-3.0776244e-01, 1.0526397e+00, 2.9497927e-01, 1.1266683e+00, 8.4419928e-02], - [-2.1593940e+00, -1.0187222e+00, 1.7475771e+00, -3.5802367e-01, -1.2900480e+00]], - [[ 3.2892069e-01, -1.6604670e+00, -5.7856506e-01, 5.8143520e-01, 5.9596705e-01], - [-1.5992336e-01, -5.9647644e-01, 1.2957820e+00, -1.0650631e-01, 7.0879894e-01], - [ 4.1372257e-01, 3.6408889e-01, -6.3091749e-01, 1.0573713e+00, 1.0981073e+00], - [-1.9162457e-01, 3.6392561e-05, -1.8338780e-01, 1.7549801e+00, -9.3534666e-01]]]]).astype(np.float32) - - dy = np.array([[[[ 2.98213929e-01, 3.10518718e+00, -1.64306939e-01, -7.33681679e-01, 5.23136854e-02], - [-3.47142726e-01, -1.52662742e+00, 5.26977003e-01, 5.29672280e-02, -4.34386432e-01], - [ 1.34674394e+00, 1.69386661e+00, 3.17139983e-01, 5.77129781e-01, 1.25290680e+00], - [-1.71099675e+00, -1.62872851e+00, -7.89083183e-01, 8.64615321e-01, -1.74364686e+00]], - [[ 1.11915946e+00, -7.06878662e-01, -6.71557069e-01, -4.50884640e-01, 2.95763493e-01], - [-7.64747679e-01, 1.62951392e-03, -2.84069944e-02, 7.55402744e-01, -1.02387452e+00], - [-5.92088878e-01, 4.47980821e-01, 4.50127304e-01, -3.99038166e-01, -5.24561822e-01], - [ 1.92535609e-01, 2.44671494e-01, -8.70469391e-01, -8.30129832e-02, -4.04477213e-03]], - [[-1.94159836e-01, -8.50215256e-01, -1.01224804e+00, 2.64235616e-01, 5.34391068e-02], - [-6.71353936e-01, 3.73690695e-01, 4.48037744e-01, -2.84973383e-01, -2.80129910e+00], - [ 6.69475198e-01, 2.08404279e+00, 4.49459851e-01, 2.50908136e+00, 9.80683088e-01], - [ 1.18290365e+00, -1.28790128e+00, -1.70202863e+00, -1.37078688e-01, 9.53227460e-01]]], - [[[-6.44128084e-01, 1.37707603e+00, -8.60912442e-01, -3.83467346e-01, 6.68365955e-01], - [-3.32795471e-01, 3.05202007e-01, 2.20850635e+00, 6.93960607e-01, -1.94968760e-01], - [-3.35764170e-01, 1.10562348e+00, -1.13264215e+00, -1.08296621e+00, -6.53923571e-01], - [-4.64974046e-01, 8.83257568e-01, -1.70353889e+00, -4.48120385e-01, -1.76938546e+00]], + x = np.array([[[[2.7866030e-01, 8.5578346e-01, -2.7546784e-01, -8.5833269e-01, 1.5753637e-01], + [-4.5145524e-01, 1.5590921e-01, -6.1947298e-01, -6.3499230e-01, -1.0625143e+00], + [-6.8716180e-01, -3.5565588e-01, 9.9680430e-01, -3.5519487e-01, 5.2122700e-01], + [-9.8125875e-01, 9.0505141e-01, 6.5961617e-01, 6.5950197e-01, 1.0319239e+00]], + [[-7.6588345e-01, -1.6929083e-01, 9.4459933e-01, -8.3931917e-01, 1.4916732e+00], + [8.1874236e-02, -1.9288104e-02, 7.3255712e-01, -1.4598954e-01, 1.1225560e+00], + [2.7356184e-01, 1.2557162e-01, 1.3796539e+00, 1.0073920e-01, 7.9203087e-01], + [-3.6947381e-01, 4.7919992e-01, 2.2421131e+00, -8.3911163e-01, 1.0814662e+00]], + [[-2.5838584e-01, 2.0765430e-01, -1.9366746e-01, 6.7511219e-01, -3.7492469e-01], + [4.4170797e-01, -9.9537361e-01, -3.5100895e-01, -7.8317386e-01, 1.1672008e-02], + [1.6037937e+00, -1.7059358e+00, -9.3724984e-01, -1.5016698e+00, -2.7605603e-02], + [1.6392696e-01, 1.0074581e+00, -2.7704465e+00, 8.1361882e-02, 7.9730105e-01]]], + [[[2.9516423e-01, 4.6354745e-02, 1.7318316e-01, 1.5894413e+00, -1.2769363e+00], + [2.8939021e-01, -3.8801813e-01, -1.3376296e+00, -4.9808905e-01, -3.2318991e-02], + [-1.1740140e+00, -1.1140432e+00, -1.4198960e-01, 5.8953021e-02, -3.6763316e-01], + [1.8660797e+00, -5.8705074e-01, 6.8757606e-01, -4.0573463e-01, -7.1130061e-01]], + [[2.6170531e-01, 5.4814044e-02, 1.3891056e-01, 3.4492522e-02, -1.0920379e-01], + [1.1420644e-01, 1.6939731e-01, -1.0413316e+00, -1.4040415e-01, -3.3280477e-01], + [-3.0776244e-01, 1.0526397e+00, 2.9497927e-01, 1.1266683e+00, 8.4419928e-02], + [-2.1593940e+00, -1.0187222e+00, 1.7475771e+00, -3.5802367e-01, -1.2900480e+00]], + [[3.2892069e-01, -1.6604670e+00, -5.7856506e-01, 5.8143520e-01, 5.9596705e-01], + [-1.5992336e-01, -5.9647644e-01, 1.2957820e+00, -1.0650631e-01, 7.0879894e-01], + [4.1372257e-01, 3.6408889e-01, -6.3091749e-01, 1.0573713e+00, 1.0981073e+00], + [-1.9162457e-01, 3.6392561e-05, -1.8338780e-01, 1.7549801e+00, -9.3534666e-01]]]]).astype( + np.float32) + + dy = np.array([[[[2.98213929e-01, 3.10518718e+00, -1.64306939e-01, -7.33681679e-01, 5.23136854e-02], + [-3.47142726e-01, -1.52662742e+00, 5.26977003e-01, 5.29672280e-02, -4.34386432e-01], + [1.34674394e+00, 1.69386661e+00, 3.17139983e-01, 5.77129781e-01, 1.25290680e+00], + [-1.71099675e+00, -1.62872851e+00, -7.89083183e-01, 8.64615321e-01, -1.74364686e+00]], + [[1.11915946e+00, -7.06878662e-01, -6.71557069e-01, -4.50884640e-01, 2.95763493e-01], + [-7.64747679e-01, 1.62951392e-03, -2.84069944e-02, 7.55402744e-01, -1.02387452e+00], + [-5.92088878e-01, 4.47980821e-01, 4.50127304e-01, -3.99038166e-01, -5.24561822e-01], + [1.92535609e-01, 2.44671494e-01, -8.70469391e-01, -8.30129832e-02, -4.04477213e-03]], + [[-1.94159836e-01, -8.50215256e-01, -1.01224804e+00, 2.64235616e-01, 5.34391068e-02], + [-6.71353936e-01, 3.73690695e-01, 4.48037744e-01, -2.84973383e-01, -2.80129910e+00], + [6.69475198e-01, 2.08404279e+00, 4.49459851e-01, 2.50908136e+00, 9.80683088e-01], + [1.18290365e+00, -1.28790128e+00, -1.70202863e+00, -1.37078688e-01, 9.53227460e-01]]], + [[[-6.44128084e-01, 1.37707603e+00, -8.60912442e-01, -3.83467346e-01, 6.68365955e-01], + [-3.32795471e-01, 3.05202007e-01, 2.20850635e+00, 6.93960607e-01, -1.94968760e-01], + [-3.35764170e-01, 1.10562348e+00, -1.13264215e+00, -1.08296621e+00, -6.53923571e-01], + [-4.64974046e-01, 8.83257568e-01, -1.70353889e+00, -4.48120385e-01, -1.76938546e+00]], [[-3.80976290e-01, -1.49393475e+00, -8.51393223e-01, -1.49780405e+00, -1.24160886e-01], - [-7.18508661e-02, 2.44543999e-01, 3.29225749e-01, 7.09274471e-01, -9.26648498e-01], - [ 6.67312503e-01, -1.08737612e+00, -9.63039994e-01, -3.22715081e-02, -4.03802067e-01], - [-5.97982287e-01, -1.40739769e-01, 2.80631828e+00, 5.72278857e-01, 2.05998325e+00]], - [[ 3.46207246e-02, 7.34213948e-01, 1.45563519e+00, 1.02045703e+00, 1.40984225e+00], - [ 4.14457440e-01, -8.74118507e-01, -4.21902031e-01, 7.87168801e-01, -1.48280108e+00], - [ 1.42688036e+00, -2.02695489e+00, 9.26816165e-01, 9.37691629e-01, 7.85577714e-01], - [-6.59893751e-01, 1.14681525e-02, -5.79456389e-01, -1.65206456e+00, 4.37116653e-01]]]]).astype(np.float32) - - expect_x = np.array([[[[0.21919312, 0.3903627, 0.12594244, 0.07031325, 0.19418849], - [0.19778392, 0.36304963, 0.16719443, 0.1646197, 0.10735231], - [0.07986113, 0.11125171, 0.43020225, 0.11130301, 0.26738194], - [0.03936873, 0.25963634, 0.20313013, 0.20310691, 0.29475793]], - [[0.05308856, 0.09640461, 0.29366633, 0.04932966, 0.50751084], - [0.13426398, 0.12134594, 0.2573638, 0.10690536, 0.38012096], - [0.13503104, 0.11645612, 0.40813455, 0.11359984, 0.22677852], - [0.04576753, 0.10693795, 0.6233836, 0.02861518, 0.19529575]], - [[0.14096586, 0.2246532, 0.15039064, 0.35853124, 0.12545899], - [0.37957698, 0.09019516, 0.17180163, 0.11151683, 0.2469094 ], - [0.7375885, 0.0269412, 0.05811028, 0.03304673, 0.14431332], - [0.16174863, 0.37599453, 0.00859921, 0.1489303, 0.3047274 ]]], - [[[0.15335402, 0.11957449, 0.13574363, 0.55949026, 0.03183762], - [0.34669915, 0.17609946, 0.06813136, 0.15774474, 0.2513253 ], - [0.09487908, 0.10074313, 0.26630113, 0.32556766, 0.21250896], - [0.6357843, 0.05469263, 0.19565557, 0.0655652, 0.0483023 ]], + [-7.18508661e-02, 2.44543999e-01, 3.29225749e-01, 7.09274471e-01, -9.26648498e-01], + [6.67312503e-01, -1.08737612e+00, -9.63039994e-01, -3.22715081e-02, -4.03802067e-01], + [-5.97982287e-01, -1.40739769e-01, 2.80631828e+00, 5.72278857e-01, 2.05998325e+00]], + [[3.46207246e-02, 7.34213948e-01, 1.45563519e+00, 1.02045703e+00, 1.40984225e+00], + [4.14457440e-01, -8.74118507e-01, -4.21902031e-01, 7.87168801e-01, -1.48280108e+00], + [1.42688036e+00, -2.02695489e+00, 9.26816165e-01, 9.37691629e-01, 7.85577714e-01], + [-6.59893751e-01, 1.14681525e-02, -5.79456389e-01, -1.65206456e+00, 4.37116653e-01]]]]).astype( + np.float32) + + expect_x = np.array([[[[0.21919312, 0.3903627, 0.12594244, 0.07031325, 0.19418849], + [0.19778392, 0.36304963, 0.16719443, 0.1646197, 0.10735231], + [0.07986113, 0.11125171, 0.43020225, 0.11130301, 0.26738194], + [0.03936873, 0.25963634, 0.20313013, 0.20310691, 0.29475793]], + [[0.05308856, 0.09640461, 0.29366633, 0.04932966, 0.50751084], + [0.13426398, 0.12134594, 0.2573638, 0.10690536, 0.38012096], + [0.13503104, 0.11645612, 0.40813455, 0.11359984, 0.22677852], + [0.04576753, 0.10693795, 0.6233836, 0.02861518, 0.19529575]], + [[0.14096586, 0.2246532, 0.15039064, 0.35853124, 0.12545899], + [0.37957698, 0.09019516, 0.17180163, 0.11151683, 0.2469094], + [0.7375885, 0.0269412, 0.05811028, 0.03304673, 0.14431332], + [0.16174863, 0.37599453, 0.00859921, 0.1489303, 0.3047274]]], + [[[0.15335402, 0.11957449, 0.13574363, 0.55949026, 0.03183762], + [0.34669915, 0.17609946, 0.06813136, 0.15774474, 0.2513253], + [0.09487908, 0.10074313, 0.26630113, 0.32556766, 0.21250896], + [0.6357843, 0.05469263, 0.19565557, 0.0655652, 0.0483023]], [[0.23898226, 0.19431841, 0.21136671, 0.19040942, 0.16492325], - [0.2641041, 0.27909, 0.08316323, 0.20473833, 0.16890427], - [0.08062991, 0.3142761, 0.14732064, 0.33842432, 0.11934903], - [0.01604616, 0.05020634, 0.79826504, 0.09720672, 0.03827571]], + [0.2641041, 0.27909, 0.08316323, 0.20473833, 0.16890427], + [0.08062991, 0.3142761, 0.14732064, 0.33842432, 0.11934903], + [0.01604616, 0.05020634, 0.79826504, 0.09720672, 0.03827571]], [[0.24191543, 0.03308899, 0.09762195, 0.31140763, 0.31596598], - [0.10669514, 0.06895282, 0.45745608, 0.11254943, 0.25434658], - [0.16156755, 0.15374413, 0.05684244, 0.3075298, 0.32031605], - [0.09346025, 0.11320464, 0.09423324, 0.65467626, 0.04442552]]]]).astype(np.float32) - - expect_dx = np.array([[[[-0.20103945, 0.737705 , -0.17376284, -0.1370458 , -0.22585672], - [ 0.04461281, -0.34632078, 0.18386088, 0.10299816, 0.01484894], - [ 0.04113413, 0.09592049, -0.22135337, -0.02833145, 0.11263024], - [-0.0284293 , -0.1661311 , 0.04058228, 0.37645525, -0.22247711]], - [[ 0.06355994, -0.06061868, -0.17428297, -0.01839012, 0.1897318 ], - [-0.04652473, 0.05094835, 0.10032654, 0.12546772, -0.23021786], - [-0.07882182, 0.05314343, 0.18712361, -0.04438123, -0.11706398], - [ 0.03219109, 0.08079126, -0.22419631, 0.01224192, 0.09897206]], - [[ 0.01057316, -0.1305348 , -0.11175273, 0.19124077, 0.04047358], - [ 0.07448982, 0.11195826, 0.2260284 , 0.06497248, -0.47744888], - [-0.09664576, 0.03458005, -0.02039931, 0.05646288, 0.02600216], - [ 0.1973966 , -0.47014874, -0.01431374, -0.01483214, 0.30189803]]], - [[[-0.06132338, 0.19386888, -0.08370841, -0.07789247, 0.02905542], - [-0.16714299, 0.0274538 , 0.14029635, 0.08591694, -0.08652411], - [ 0.03585254, 0.18327834, -0.11158065, -0.12024056, 0.01269035], - [ 0.14654502, 0.0863447 , -0.19723451, 0.01621746, -0.05187264]], - [[ 0.11614501, -0.12182987, 0.00329342, -0.12011584, 0.12250728], - [-0.03623635, 0.05001016, 0.02194443, 0.13183522, -0.16755345], - [ 0.09322704, -0.18807998, -0.06984743, 0.15454148, 0.01015892], - [-0.04743218, -0.12545264, 0.35787603, -0.1735842 , -0.01140684]], - [[-0.21854429, -0.00674347, 0.05053139, 0.02567403, 0.14908233], - [ 0.09731252, -0.02596174, 0.03463032, 0.14460044, -0.2505815 ], - [ 0.1478814 , -0.3902862 , 0.02360253, 0.13103928, 0.087763 ], - [ 0.04834083, 0.13455458, 0.05632052, -0.3109298 , 0.07171366]]]]).astype(np.float32) + [0.10669514, 0.06895282, 0.45745608, 0.11254943, 0.25434658], + [0.16156755, 0.15374413, 0.05684244, 0.3075298, 0.32031605], + [0.09346025, 0.11320464, 0.09423324, 0.65467626, 0.04442552]]]]).astype(np.float32) + + expect_dx = np.array([[[[-0.20103945, 0.737705, -0.17376284, -0.1370458, -0.22585672], + [0.04461281, -0.34632078, 0.18386088, 0.10299816, 0.01484894], + [0.04113413, 0.09592049, -0.22135337, -0.02833145, 0.11263024], + [-0.0284293, -0.1661311, 0.04058228, 0.37645525, -0.22247711]], + [[0.06355994, -0.06061868, -0.17428297, -0.01839012, 0.1897318], + [-0.04652473, 0.05094835, 0.10032654, 0.12546772, -0.23021786], + [-0.07882182, 0.05314343, 0.18712361, -0.04438123, -0.11706398], + [0.03219109, 0.08079126, -0.22419631, 0.01224192, 0.09897206]], + [[0.01057316, -0.1305348, -0.11175273, 0.19124077, 0.04047358], + [0.07448982, 0.11195826, 0.2260284, 0.06497248, -0.47744888], + [-0.09664576, 0.03458005, -0.02039931, 0.05646288, 0.02600216], + [0.1973966, -0.47014874, -0.01431374, -0.01483214, 0.30189803]]], + [[[-0.06132338, 0.19386888, -0.08370841, -0.07789247, 0.02905542], + [-0.16714299, 0.0274538, 0.14029635, 0.08591694, -0.08652411], + [0.03585254, 0.18327834, -0.11158065, -0.12024056, 0.01269035], + [0.14654502, 0.0863447, -0.19723451, 0.01621746, -0.05187264]], + [[0.11614501, -0.12182987, 0.00329342, -0.12011584, 0.12250728], + [-0.03623635, 0.05001016, 0.02194443, 0.13183522, -0.16755345], + [0.09322704, -0.18807998, -0.06984743, 0.15454148, 0.01015892], + [-0.04743218, -0.12545264, 0.35787603, -0.1735842, -0.01140684]], + [[-0.21854429, -0.00674347, 0.05053139, 0.02567403, 0.14908233], + [0.09731252, -0.02596174, 0.03463032, 0.14460044, -0.2505815], + [0.1478814, -0.3902862, 0.02360253, 0.13103928, 0.087763], + [0.04834083, 0.13455458, 0.05632052, -0.3109298, 0.07171366]]]]).astype(np.float32) y = Net()(Tensor(x)) assert np.allclose(y.asnumpy(), expect_x) diff --git a/tests/st/ops/gpu/test_sparse_softmax_cross_entropy_with_logits_op.py b/tests/st/ops/gpu/test_sparse_softmax_cross_entropy_with_logits_op.py index 090a06ba19..a4afc081b6 100644 --- a/tests/st/ops/gpu/test_sparse_softmax_cross_entropy_with_logits_op.py +++ b/tests/st/ops/gpu/test_sparse_softmax_cross_entropy_with_logits_op.py @@ -20,8 +20,9 @@ from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + class NetSparseSoftmaxCrossEntropyWithLogits(nn.Cell): - def __init__( self): + def __init__(self): super(NetSparseSoftmaxCrossEntropyWithLogits, self).__init__() self.loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) self.dlogits = nn.SoftmaxCrossEntropyWithLogits(is_grad=True, sparse=True) @@ -34,10 +35,10 @@ class NetSparseSoftmaxCrossEntropyWithLogits(nn.Cell): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_sparse_softmax_cross_entropy_with_logits(): - logits = Tensor(np.array([[1,1,10], - [1,10,1], - [10,1,1]]).astype(np.float32)) - labels = Tensor(np.array([2,1,0]).astype(np.int32)) + logits = Tensor(np.array([[1, 1, 10], + [1, 10, 1], + [10, 1, 1]]).astype(np.float32)) + labels = Tensor(np.array([2, 1, 0]).astype(np.int32)) expect_loss = 0.0002467 expect_dlogits = np.array([[4.1126452e-05, 4.1126452e-05, -8.2234539e-05], [4.1126452e-05, -8.2234539e-05, 4.1126452e-05], diff --git a/tests/st/ops/gpu/test_sqrt_op.py b/tests/st/ops/gpu/test_sqrt_op.py index 7dd9b75747..ea892798cb 100644 --- a/tests/st/ops/gpu/test_sqrt_op.py +++ b/tests/st/ops/gpu/test_sqrt_op.py @@ -35,4 +35,3 @@ def test_sqrt(): output_ms = P.Rsqrt()(Tensor(x_np)) output_np = 1 / np.sqrt(x_np) assert np.allclose(output_ms.asnumpy(), output_np) - diff --git a/tests/st/ops/gpu/test_tanh_op.py b/tests/st/ops/gpu/test_tanh_op.py index 8f6a6eefa4..dc020b7d56 100644 --- a/tests/st/ops/gpu/test_tanh_op.py +++ b/tests/st/ops/gpu/test_tanh_op.py @@ -23,6 +23,7 @@ import mindspore.context as context context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + class TanhNet(nn.Cell): def __init__(self): super(TanhNet, self).__init__() @@ -48,25 +49,25 @@ class Grad(nn.Cell): @pytest.mark.env_onecard def test_Tanh(): x_np = np.array( - [[ 0.28522366, 0.38033979, 1.54657853, -0.98530175, -0.54365635, 0.12652203, -1.33449938, -0.27737698], - [ 2.06282293, 0.84635078, 0.16628414, -0.91823183, -0.72023044, -0.09147043, -0.04166984, -1.5664763 ], - [-0.17157249, 0.44260951, -0.6683391, 1.13142613, 1.5536937, -0.32799768, -0.20016545, 0.06773927]], - dtype= np.float32) + [[0.28522366, 0.38033979, 1.54657853, -0.98530175, -0.54365635, 0.12652203, -1.33449938, -0.27737698], + [2.06282293, 0.84635078, 0.16628414, -0.91823183, -0.72023044, -0.09147043, -0.04166984, -1.5664763], + [-0.17157249, 0.44260951, -0.6683391, 1.13142613, 1.5536937, -0.32799768, -0.20016545, 0.06773927]], + dtype=np.float32) dy_np = np.array( - [[ 0.44969849, -0.187879, -0.64300827, 1.36638774, 0.89930276, -0.23835229, -0.67771854, -1.88984999], - [ 2.00418801, 2.33336475, 0.00241747, 1.31558685, 0.06768817, -2.23008804, -0.26818366, -1.26873401], - [ 1.83694105, 0.5339005, 0.51117424, 0.49202378, -0.83297819, -0.71001219, 0.18913512, 0.65580389]], - dtype= np.float32) + [[0.44969849, -0.187879, -0.64300827, 1.36638774, 0.89930276, -0.23835229, -0.67771854, -1.88984999], + [2.00418801, 2.33336475, 0.00241747, 1.31558685, 0.06768817, -2.23008804, -0.26818366, -1.26873401], + [1.83694105, 0.5339005, 0.51117424, 0.49202378, -0.83297819, -0.71001219, 0.18913512, 0.65580389]], + dtype=np.float32) x_ms = Tensor(x_np) dy_ms = Tensor(dy_np) - + net = TanhNet() grad = Grad(net) output = grad(x_ms, dy_ms) - expect = [[ 0.41501077, -0.16312202, -0.10675912, 0.58678646, 0.67828224, -0.23457714, -0.1643468 , -1.75159405], - [ 0.12541081, 1.2251587 , 0.00235184, 0.62396731, 0.04191568, -2.21153283, -0.26771853, -0.20311764], - [ 1.78391056, 0.44159236, 0.33690308, 0.16800483, -0.13651318, -0.63878956, 0.18175511, 0.65280384]] + expect = [[0.41501077, -0.16312202, -0.10675912, 0.58678646, 0.67828224, -0.23457714, -0.1643468, -1.75159405], + [0.12541081, 1.2251587, 0.00235184, 0.62396731, 0.04191568, -2.21153283, -0.26771853, -0.20311764], + [1.78391056, 0.44159236, 0.33690308, 0.16800483, -0.13651318, -0.63878956, 0.18175511, 0.65280384]] assert np.allclose(output[0].asnumpy(), expect) diff --git a/tests/st/ops/gpu/test_tensoradd.py b/tests/st/ops/gpu/test_tensoradd.py index 23c23f12d2..7b73f512dc 100644 --- a/tests/st/ops/gpu/test_tensoradd.py +++ b/tests/st/ops/gpu/test_tensoradd.py @@ -25,6 +25,7 @@ from mindspore.common.parameter import Parameter context.set_context(device_target='GPU') + class TensroAdd(nn.Cell): def __init__(self): super(TensroAdd, self).__init__() @@ -57,6 +58,7 @@ class TensroAdd(nn.Cell): self.add(self.x, self.y), self.add(self.x1, self.y1), self.add(self.x2, self.y2), self.add(self.x3, self.y3)) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard diff --git a/tests/st/ops/gpu/test_transpose_op.py b/tests/st/ops/gpu/test_transpose_op.py index 61f96fa78c..af94dfffcb 100644 --- a/tests/st/ops/gpu/test_transpose_op.py +++ b/tests/st/ops/gpu/test_transpose_op.py @@ -25,6 +25,7 @@ import mindspore.context as context context.set_context(device_target='GPU') + class Transpose(nn.Cell): def __init__(self): super(Transpose, self).__init__() diff --git a/tests/st/ops/gpu/test_unsorted_segment_sum.py b/tests/st/ops/gpu/test_unsorted_segment_sum.py index 1a4278d6cc..49e0099f6a 100644 --- a/tests/st/ops/gpu/test_unsorted_segment_sum.py +++ b/tests/st/ops/gpu/test_unsorted_segment_sum.py @@ -26,6 +26,7 @@ from mindspore.common import dtype as mstype context.set_context(device_target='GPU') + class UnsortedSegmentSumNet(nn.Cell): def __init__(self, num_segments): super(UnsortedSegmentSumNet, self).__init__() @@ -35,38 +36,38 @@ class UnsortedSegmentSumNet(nn.Cell): def construct(self, data, ids): return self.unsorted_segment_sum(data, ids, self.num_segments) + @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_1D(): - input_x = Tensor([1, 2, 3, 4], mstype.float32) - segment_ids = Tensor([0, 0, 1, 2], mstype.int32) - num_segments = 4 + input_x = Tensor([1, 2, 3, 4], mstype.float32) + segment_ids = Tensor([0, 0, 1, 2], mstype.int32) + num_segments = 4 - net = UnsortedSegmentSumNet(num_segments) - output = net(input_x, segment_ids) - expect = [3, 3, 4, 0] - assert (output.asnumpy() == expect).all() + net = UnsortedSegmentSumNet(num_segments) + output = net(input_x, segment_ids) + expect = [3, 3, 4, 0] + assert (output.asnumpy() == expect).all() @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_2D(): - input_x = Tensor([[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]], mstype.float32) - segment_ids = Tensor([2, 1, 1], mstype.int32) - num_segments = 4 - - net = UnsortedSegmentSumNet(num_segments) - output = net(input_x, segment_ids) - expect = [[ 0, 0, 0, 0], - [14, 16, 18, 20], - [ 1, 2, 3, 4], - [ 0, 0, 0, 0]] - assert (output.asnumpy() == expect).all() + input_x = Tensor([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]], mstype.float32) + segment_ids = Tensor([2, 1, 1], mstype.int32) + num_segments = 4 + net = UnsortedSegmentSumNet(num_segments) + output = net(input_x, segment_ids) + expect = [[0, 0, 0, 0], + [14, 16, 18, 20], + [1, 2, 3, 4], + [0, 0, 0, 0]] + assert (output.asnumpy() == expect).all() @pytest.mark.level0 @@ -79,11 +80,11 @@ def test_3D(): net = UnsortedSegmentSumNet(num_segments) output = net(input_x, segment_ids) - expect = [[[ 0., 0., 0.], - [ 0., 0., 0.], - [ 0., 0., 0.], - [ 0., 0., 0.], - [ 0., 0., 0.]], + expect = [[[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]], [[45., 47., 49.], [51., 53., 55.], @@ -91,21 +92,21 @@ def test_3D(): [63., 65., 67.], [69., 71., 73.]], - [[ 0., 1., 2.], - [ 3., 4., 5.], - [ 6., 7., 8.], - [ 9., 10., 11.], + [[0., 1., 2.], + [3., 4., 5.], + [6., 7., 8.], + [9., 10., 11.], [12., 13., 14.]], - [[ 0., 0., 0.], - [ 0., 0., 0.], - [ 0., 0., 0.], - [ 0., 0., 0.], - [ 0., 0., 0.]], - - [[ 0., 0., 0.], - [ 0., 0., 0.], - [ 0., 0., 0.], - [ 0., 0., 0.], - [ 0., 0., 0.]]] + [[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]], + + [[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]]] assert (output.asnumpy() == expect).all() diff --git a/tests/st/ops/gpu/test_zeroslike_op.py b/tests/st/ops/gpu/test_zeroslike_op.py index 46a031249a..50cfb0b424 100644 --- a/tests/st/ops/gpu/test_zeroslike_op.py +++ b/tests/st/ops/gpu/test_zeroslike_op.py @@ -24,6 +24,8 @@ from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") + + class NetZerosLike(nn.Cell): def __init__(self): super(NetZerosLike, self).__init__() diff --git a/tests/st/ops/test_rmsprop.py b/tests/st/ops/test_rmsprop.py index dcf65be2d9..8732da7546 100644 --- a/tests/st/ops/test_rmsprop.py +++ b/tests/st/ops/test_rmsprop.py @@ -28,15 +28,16 @@ class NetRMSProp(nn.Cell): super(NetRMSProp, self).__init__() self.use_centered = use_centered if use_centered: - self.rms_opt = P.ApplyCenteredRMSProp() + self.rms_opt = P.ApplyCenteredRMSProp() else: - self.rms_opt = P.ApplyRMSProp() + self.rms_opt = P.ApplyRMSProp() def construct(self, var, g, mg, rms, mom, lr, decay, momentum, epsilon): if self.use_centered: - return self.rms_opt(var, mg, rms, mom, g, lr, decay, momentum, epsilon) + return self.rms_opt(var, mg, rms, mom, g, lr, decay, momentum, epsilon) else: - return self.rms_opt(var, rms, mom, g, lr, decay, momentum, epsilon) + return self.rms_opt(var, rms, mom, g, lr, decay, momentum, epsilon) + def rmsprop_numpy(variable, gradients, mean_square, moment, learning_rate, decay, momentum, epsilon): @@ -44,11 +45,13 @@ def rmsprop_numpy(variable, gradients, mean_square, moment, moment = momentum * moment + learning_rate / np.sqrt(mean_square + epsilon) * gradients variable = variable - moment + def rmspropcented_numpy(variable, gradients, mean_gradients, mean_square, moment, learning_rate, decay, momentum, epsilon): mean_gradients = mean_gradients * decay + (1.0 - decay) * gradients mean_square = mean_square * decay + (1.0 - decay) * gradients * gradients - moment = momentum * moment + learning_rate / np.sqrt(mean_square -mean_gradients * mean_gradients + epsilon) * gradients + moment = momentum * moment + learning_rate / np.sqrt( + mean_square - mean_gradients * mean_gradients + epsilon) * gradients variable = variable - moment @@ -78,7 +81,7 @@ def test_rmsprop(): learning_rate, decay, momentum, epsilon) net = NetRMSProp(centered) - _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, + _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms, learning_rate, decay, momentum, epsilon) error = np.ones(shape=variable_np.shape) * 10e-6 @@ -128,7 +131,7 @@ def test_rmspropcenter(): learning_rate, decay, momentum, epsilon) net = NetRMSProp(centered) - _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms, + _ = net(variable_ms, gradients_ms, mean_gradients_ms, mean_square_ms, moment_ms, learning_rate, decay, momentum, epsilon) error = np.ones(shape=variable_np.shape) * 10e-6 @@ -149,4 +152,4 @@ def test_rmspropcenter(): error = np.ones(shape=moment_np.shape) * 10e-6 diff = moment_ms.asnumpy() - moment_np - assert np.all(diff < error) \ No newline at end of file + assert np.all(diff < error) diff --git a/tests/st/pynative/test_ascend_lenet.py b/tests/st/pynative/test_ascend_lenet.py index 5a84aaf930..4c1b854144 100644 --- a/tests/st/pynative/test_ascend_lenet.py +++ b/tests/st/pynative/test_ascend_lenet.py @@ -158,4 +158,4 @@ def test_ascend_pynative_lenet(): total_time = total_time + cost_time print("======epoch: ", epoch, " loss: ", loss_output.asnumpy(), " cost time: ", cost_time) - assert(loss_output.asnumpy() < 0.1) + assert (loss_output.asnumpy() < 0.1) diff --git a/tests/st/summary/test_davinci_summary.py b/tests/st/summary/test_davinci_summary.py index 7b55b9da56..2ee9e35aea 100644 --- a/tests/st/summary/test_davinci_summary.py +++ b/tests/st/summary/test_davinci_summary.py @@ -99,4 +99,4 @@ def test_net(): input_np = np.ones([32, 2048, 14, 14]).astype(np.float32) * 0.01 label_np = np.ones([32]).astype(np.int32) me_train_tensor(net, input_np, label_np) - #me_infer_tensor(net, input_np) + # me_infer_tensor(net, input_np) diff --git a/tests/st/summary/test_gpu_summary.py b/tests/st/summary/test_gpu_summary.py index e4885ed3d5..49ed76eb53 100644 --- a/tests/st/summary/test_gpu_summary.py +++ b/tests/st/summary/test_gpu_summary.py @@ -24,10 +24,8 @@ from mindspore.common.tensor import Tensor from mindspore.ops import operations as P from mindspore.train.summary.summary_record import SummaryRecord - context.set_context(mode=context.GRAPH_MODE, device_target="GPU") - CUR_DIR = os.getcwd() SUMMARY_DIR_ME = CUR_DIR + "/test_me_summary_event_file/" SUMMARY_DIR_ME_TEMP = CUR_DIR + "/test_me_temp_summary_event_file/" @@ -92,7 +90,6 @@ def train_summary_record_scalar_for_1(test_writer, steps, fwd_x, fwd_y): def me_scalar_summary(steps, tag=None, value=None): with SummaryRecord(SUMMARY_DIR_ME_TEMP) as test_writer: - x = Tensor(np.array([1.1]).astype(np.float32)) y = Tensor(np.array([1.2]).astype(np.float32)) diff --git a/tests/ut/cpp/python_input/gtest_input/ir/clone_test.py b/tests/ut/cpp/python_input/gtest_input/ir/clone_test.py index 7fa9c76871..ab2a39835d 100644 --- a/tests/ut/cpp/python_input/gtest_input/ir/clone_test.py +++ b/tests/ut/cpp/python_input/gtest_input/ir/clone_test.py @@ -14,48 +14,64 @@ # ============================================================================ """ Test for GraphCloner """ from mindspore.ops import Primitive + scala_add = Primitive('scalar_add') scalar_mul = Primitive('scalar_mul') + def test_clone_simple(): def f(x, y): a = scalar_mul(x, x) b = scalar_mul(y, y) c = scala_add(a, b) return c + return f + def test_clone_closure(x, y): def j(z): a = x + y b = a + z return b + c = j(3) return c + def test_clone_scoping(): """ test_clone_scoping """ print("run python test_clone_scoping") + def f(x, y): def h(z): # No dependency on f, so not nested and not cloned return z * z + def g(z): def gg(): return z + z + # Depends on f, therefore cloned return x + y + gg() + def i(q): # Depends on f, therefore cloned return g(1) * q + return g(1) + h(x) + i(y) + return f + def test_clone_total(): print("run python test_clone_total") + def clone_total(y): return clone_total_sub(y) + 3 + return clone_total + def clone_total_sub(x): return x * x diff --git a/tests/ut/cpp/python_input/gtest_input/ir/manager_test.py b/tests/ut/cpp/python_input/gtest_input/ir/manager_test.py index 8aad56050c..dcb3827601 100644 --- a/tests/ut/cpp/python_input/gtest_input/ir/manager_test.py +++ b/tests/ut/cpp/python_input/gtest_input/ir/manager_test.py @@ -14,33 +14,45 @@ # ============================================================================ """ Test for manager """ + def ir_get_fn(x, y): return x - y + def test_flat(x): return x + def test_nested(x): def g(): return x + return g + def test_fake_nested(x): return x + def test_recurse(x): def g(): return g() + x + return g + def test_calls(x): a = x + x + def h(): return a + def g(): return h() + return g() + # pylint: disable=unused-argument def test_unused_param(x, y): return x * x @@ -49,10 +61,13 @@ def test_unused_param(x, y): def test_cannot_replace_return(x): return x * x + # custom test function def test_custom(x, y, z): def g(x1, y1): def h(x2): return x2 + y1 + z + return h(x1) + return g(x, y) diff --git a/tests/ut/cpp/python_input/gtest_input/mem_reuse/mem_reuse_test.py b/tests/ut/cpp/python_input/gtest_input/mem_reuse/mem_reuse_test.py index 73ec6ef4e7..8014d04b9b 100644 --- a/tests/ut/cpp/python_input/gtest_input/mem_reuse/mem_reuse_test.py +++ b/tests/ut/cpp/python_input/gtest_input/mem_reuse/mem_reuse_test.py @@ -17,6 +17,7 @@ from mindspore.ops import operations as P add = P.TensorAdd() reshape = P.Reshape() + def test_shape_add(x1, x2, y1, y2, z1, z2): sum1 = add(x1, x2) sum2 = add(y1, y2) diff --git a/tests/ut/cpp/python_input/gtest_input/optimizer/ad/ad_test.py b/tests/ut/cpp/python_input/gtest_input/optimizer/ad/ad_test.py index 882c5f0eff..d945d7286c 100644 --- a/tests/ut/cpp/python_input/gtest_input/optimizer/ad/ad_test.py +++ b/tests/ut/cpp/python_input/gtest_input/optimizer/ad/ad_test.py @@ -19,72 +19,93 @@ import mindspore as ms from mindspore.common.tensor import Tensor from mindspore.ops import Primitive from mindspore.model_zoo.resnet import resnet50 + scala_add = Primitive('scalar_add') + @dataclass class Point: x: float y: float + def abs(self): return (self.x ** 2 + self.y ** 2) ** 0.5 + def scalar_add(x, y): """Implement `scalar_add`.""" return x + y + def scalar_mul(x, y): """Implement `scalar_mul`.""" return x * y + # Test:common function def test_null(x, y): return scala_add(10.0, 28.0 / 43.0) + def test_grad_add(x, y): return scala_add(x, y) + def test_grad_expr(x, y): - return x**3.0 * y**4.0 + return x ** 3.0 * y ** 4.0 + def test_constant(x): return 18.0 * x + def test_dup_args_in_call(x): """The naive gradient update rule fails when a function's arguments contain the same variable more than once.""" return x * x + def test_quadruple_args_in_call(x): """Test that duplicated arguments still cause no problem even if there are four of them.""" + def g(a, b, c, d): return a * b * c * d + return g(x, x, x, x) + def test_tuples(x, y): tup = scala_add(x, y), x * y z = scala_add(tup[0], tup[1]) return z + def test_dataclass(x, y): pt = Point(x, y) return pt.x * pt.y + def test_dataclass_2(x, y): pt = Point(x, y) return pt.abs() + def test_hof(a, b): """Test higher order functions.""" + def f(g, x): return g(x) * g(scala_add(x, 10.0)) + def g(x): return x * b + return scala_add(f(g, a), f(g, b)) def test_hof_tup(a, b): """Test higher order functions.""" + def f(gh, x, y): g, h = gh return scalar_mul(g(x, y), h(x, y)) @@ -94,24 +115,30 @@ def test_hof_tup(a, b): def test_simple_closure(a, b): """Test some trivial closures.""" + def f(): return a + 1.0 def g(): return b + 2.0 + return f() * g() + def test_closure(a): """This is the closure test in the paper.""" - def x1(b): + def x1(b): def x4(c): return c * b + return x4 + x2 = x1(a) x3 = x2(1.0) return x3 + def test_if(a, b): # This is max, but what this is really testing is the most basic # if statement, so I prefer to name the test 'test_if' @@ -119,24 +146,29 @@ def test_if(a, b): return a return b + def test_if2(a, b): if a > b: return a * a return b + b + def test_fact(x): def fact(n): if n <= 1: return 1 return n * fact(n - 1) + return fact(x) + def test_while(x): rval = x while rval < 100: rval = rval * rval return rval + def test_while_2(x, y, z): rval = 0 # Cannot compare to 0 or finite diff is unstable @@ -145,6 +177,7 @@ def test_while_2(x, y, z): x = x - z return rval + def test_pow10(x): v = x j = 0 @@ -156,6 +189,7 @@ def test_pow10(x): j = j + 1 return v + def test_nested_closure(x): a = x * x b = x + 5 @@ -166,14 +200,18 @@ def test_nested_closure(x): def h(): return a * b + return g if x < 0 else h + return f()() + def test_functions_in_tuples(x, y): tup = scalar_add, scalar_mul f, g = tup return f(x, y) + g(x, y) + def test_closures_in_tuples(x, y): def f(): return x * y @@ -185,17 +223,20 @@ def test_closures_in_tuples(x, y): ff, gg = tup return scala_add(ff(), gg()) + # tensor test def test_tensor_add(x, y): t1 = Tensor(np.ones(x)) t2 = Tensor(np.zeros(y), ms.float32) return t1 + t2 + def test_tensor_set_type(x): t = Tensor(x) t.set_dtype(ms.float32) return t + def test_tensor_mul(x, y): x = Tensor(x) y = Tensor(y) @@ -203,36 +244,46 @@ def test_tensor_mul(x, y): return z + def test_tensor_sub(x, y): x = Tensor(x) y = Tensor(y) z = x - y return z + relu = Primitive('relu') + + # Extension test def test_ops_fn(x): foo = relu(x) return foo + def test_clone_simple(x, y): a = x * x b = y * y c = a + b return c + def test_more_closure(a, b): """Test some trivial closures.""" z = 1 + def f(): return a + z def g(): return b + 2.0 + return f() * g() + def test_more_hof(a, b): """Test higher order functions.""" + def f(g, h, x): return g(x) * h(x) * g(x + 10.0) @@ -244,6 +295,7 @@ def test_more_hof(a, b): return scala_add(f(g, h, a), f(g, h, b)) + def test_constant_output(x, y): return 1 diff --git a/tests/ut/cpp/python_input/gtest_input/optimizer/cconv_test.py b/tests/ut/cpp/python_input/gtest_input/optimizer/cconv_test.py index c3be324406..df67d23b9a 100644 --- a/tests/ut/cpp/python_input/gtest_input/optimizer/cconv_test.py +++ b/tests/ut/cpp/python_input/gtest_input/optimizer/cconv_test.py @@ -19,49 +19,63 @@ @Desc : parse python function for ut of cconv """ + class FnDict: def __init__(self): self.fnDict = {} + def __call__(self, fn): self.fnDict[fn.__name__] = fn + def __getitem__(self, name): return self.fnDict[name] + # pylint: disable=unused-variable def get_test_cconv_fn(tag): """ get_test_cconv_fn """ fns = FnDict() + @fns def test_straight(x, y): return x * x + y * y + @fns def test_simple_closure(x): def g(): return x + return g + @fns def test_max(x, y): if x > y: return x return y + @fns def test_deep_nesting(x): def f(y): def g(z): def h(): return y + z + return h() + return g(x) + a = f(x + 1) b = f(x - 3) return a() + b() + @fns def test_return_in_double_while(x): while x > 0: while x > 0: - x = x -1 + x = x - 1 return x return -1 + @fns def test_pow10(x): v = x @@ -73,6 +87,7 @@ def get_test_cconv_fn(tag): i = i + 1 j = j + 1 return v + @fns def test_closure_as_simple_fv(x): def f(): @@ -80,62 +95,89 @@ def get_test_cconv_fn(tag): def g(): return f() + return g() + @fns def test_closure_as_fv(x, y): def ax(): return x + def bx(): return ax() + def cx(): return bx() + def gx(): return cx() + def ay(): return y + def by(): return ay() + def cy(): return by() + def gy(): return cy() + def g(): return gx() + gy() + return g() + @fns def test_closure_as_double_fv(x): def a(): return x + def b(y): def e(): return y + return e() + a() + def g(y): def c(): return b(y) + return c() + return g(1) + @fns def test_closure_lift_same_param(x): def a(): return x + def b(): return a() + def c(): return x + def d(): return c() + def f(y): def e(): return y + return e() + d() + b() + def g(): return f(1) + return g() + @fns def test_closure_as_loop(x, lower_bound): def fv_func(y): return x * y + ret = 0 i = lower_bound while i < 100: @@ -143,21 +185,30 @@ def get_test_cconv_fn(tag): i += 1 ret += ele return ret + @fns def test_closure_lift_cnode(x): def a(i, j): return i, j + def f(): def e(): return x + m = a(x, e()) n = a(m, m) + def b(): return m, n + def d(): return n, m + return b(), d() + def g(): return f() + return g() + return fns[tag] diff --git a/tests/ut/cpp/python_input/gtest_input/optimizer/clean_test.py b/tests/ut/cpp/python_input/gtest_input/optimizer/clean_test.py index ab6805ef81..ff9b5880d4 100644 --- a/tests/ut/cpp/python_input/gtest_input/optimizer/clean_test.py +++ b/tests/ut/cpp/python_input/gtest_input/optimizer/clean_test.py @@ -21,14 +21,16 @@ from dataclasses import dataclass -#Test_Erase_class +# Test_Erase_class @dataclass class Point: x: float y: float + def product(self): return self.x * self.y + def test_erase_class_fn(p_in): p = Point(p_in) return p.x * p.y diff --git a/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py b/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py index 836a586ca7..d79cd85aeb 100644 --- a/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py +++ b/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py @@ -30,140 +30,175 @@ scalar_mul = Primitive('scalar_mul') tuple_getitem = Primitive('tuple_getitem') switch = Primitive('switch') + def test_sexp_conversion(): """ test_sexp_conversion """ return scalar_mul(10, scalar_add(5, 4)) + class FnDict: def __init__(self): self.fnDict = {} + def __call__(self, fn): self.fnDict[fn.__name__] = fn + def __getitem__(self, name): return self.fnDict[name] + def test_add_zero(tag): """ test_add_zero """ fns = FnDict() + @fns def before_1(x): return scalar_add(x, 0) + @fns def before_2(x): return scalar_add(scalar_add(x, 0), 0) + @fns def after(x): return x return fns[tag] + def test_elimR(tag): """ test_elimR """ R = Primitive('R') fns = FnDict() + @fns def before_1(x): return R(x) + @fns def after(x): return x return fns[tag] + def test_idempotent(tag): """ test_idempotent """ P = Primitive('P') R = Primitive('R') fns = FnDict() + @fns def before_1(x): return P(P(x)) + @fns def before_2(x): return P(P(P(P(P(x))))) + @fns def after(x): return P(x) return fns[tag] + def test_constant_variable(tag): """ test_constant_variable """ P = Primitive('P') Q = Primitive('Q') fns = FnDict() + @fns def before_1(x): return Q(15) + Q(x) + @fns def after(x): return P(15) + Q(x) return fns[tag] + def cost(x): """ cost """ return x * 10 + J = Primitive('J') + def test_expendJ(x): """ test_expendJ """ return J(cost)(x) + def test_elim_jinv_j(tag): """ test_elim_jinv_j """ J = Primitive('J') Jinv = Primitive('Jinv') fns = FnDict() + @fns def before_1(x): return J(Jinv(x)) + @fns def before_2(x): return Jinv(J(x)) + @fns def after(x): return x return fns[tag] + def test_simplify_always_true_false(tag): """ test_simplify_always_true_false """ fns = FnDict() + @fns def before_1(x, y): return switch(True, x, y) + @fns def before_2(x, y): return switch(False, y, x) + @fns def after(x, y): return x return fns[tag] + def test_inline(tag): """ test_inline """ fns = FnDict() + @fns def before(x, y): def fn1(x1): return x1 + return fn1(x) + @fns def after(x, y): return x return fns[tag] + def test_inline_successively(tag): """ test_inline_successively """ fns = FnDict() + def one(x): return x + 1 @@ -183,15 +218,18 @@ def test_inline_successively(tag): return fns[tag] + def test_inline_closure(tag): """ test_inline_closure """ fns = FnDict() + @fns def before(x, y, z): c = z * z def f(x): return x + c + return f(x * y) @fns @@ -201,20 +239,26 @@ def test_inline_closure(tag): return fns[tag] + def test_inline_deep_closure(tag): """ test_inline_deep_closure """ fns = FnDict() + def f(x): w = x * x def g(): def h(): return w + return h() + return g + @fns def before(x, y): return f(x)() - f(y)() + @fns def after(x, y): w1 = x * x @@ -223,30 +267,38 @@ def test_inline_deep_closure(tag): return fns[tag] + def test_inline_new_closure(tag): """ test_inline_new_closure """ fns = FnDict() + def q(x): return x * x def f(x): def g(): return q(x) + return g + @fns def before(x): return f(x) + @fns def after(x): def g(): return x * x + return g return fns[tag] + def test_inline_recursive_direct(tag): """ test_inline_recursive_direct """ fns = FnDict() + @fns def before1(x): return before1(x - 1) @@ -258,13 +310,16 @@ def test_inline_recursive_direct(tag): def helper2(x): return before1(x - 1) + return helper1(x) return fns[tag] + def test_inline_recursive(tag): """ test_inline_recursive """ fns = FnDict() + @fns def before(x): if x <= 0: @@ -273,27 +328,33 @@ def test_inline_recursive(tag): return fns[tag] + def test_inline_while(tag): """ test_inline_while """ fns = FnDict() + @fns def before(x): rval = x while rval < 100: rval = rval * rval return rval + return fns[tag] + def test_cse(tag): """ test_cse """ fns = FnDict() scalar_div = Primitive('scalar_div') + @fns def test_f1(x, y): a = scalar_add(x, y) b = scalar_add(x, y) c = scalar_mul(a, b) return c + @fns def test_f2(x, y): a = scalar_add(x, y) @@ -301,42 +362,54 @@ def test_cse(tag): c = scalar_add(scalar_mul(a, y), scalar_div(scalar_add(x, y), x)) d = scalar_add(b, c) return d + return fns[tag] + def test_arithmetic(tag): """ test_arithmetic """ fns = FnDict() identity = Primitive('identity') + @fns def multiply_by_zero_l(x): return scalar_mul(x, 0) + @fns def multiply_by_zero_r(x): return scalar_mul(0, x) + @fns def after_0(x): return 0 + @fns def multiply_by_one_l(x): return scalar_mul(x, 1) + @fns def multiply_by_one_r(x): return scalar_mul(1, x) + @fns def add_zero_l(x): return scalar_add(x, 0) + @fns def add_zero_r(x): return scalar_add(0, x) + @fns def elim_identity(x): return identity(x) + @fns def after(x): return x return fns[tag] + def test_elim_cast_same_dtype(tag): """ test_elim_cast_same_dtype """ fns = FnDict() @@ -345,6 +418,7 @@ def test_elim_cast_same_dtype(tag): @fns def fp32_cast_fp32(x, y): return cast(x, y) + @fns def after(x, y): return x @@ -357,30 +431,36 @@ def elim_reshape_same_shape(tag): fns = FnDict() reshape = P.Reshape() shape = (2, 3) + @fns def reshape_to_2_3(x): return reshape(x, shape) + @fns def after(x): return x return fns[tag] + def elim_two_reshape(tag): """ elim_two_reshape """ fns = FnDict() reshape = P.Reshape() shape = (2, 3) shape_2 = (3, 2) + @fns def before(x): return reshape(reshape(x, shape_2), shape) + @fns def after(x): return reshape(x, shape) return fns[tag] + def elim_two_cast(tag): """ elim_two_cast """ fns = FnDict() @@ -389,12 +469,14 @@ def elim_two_cast(tag): @fns def before(x, a, b): return cast(cast(x, a), b) + @fns def after(x, a, b): return cast(x, b) return fns[tag] + def test_elim_transpose(tag): """ test_elim_transpose """ fns = FnDict() @@ -404,12 +486,14 @@ def test_elim_transpose(tag): @fns def before(x): return transpose(x, perm) + @fns def after(x): return x return fns[tag] + def test_elim_tile_multiply_one(tag): """ test_elim_tile_multiply_one """ fns = FnDict() @@ -419,12 +503,14 @@ def test_elim_tile_multiply_one(tag): @fns def before(x): return tile(x, all_one) + @fns def after(x): return x return fns[tag] + def test_elim_reduce_mean_shape_one(tag): """ test_elim_reduce_mean_shape_one """ fns = FnDict() @@ -433,12 +519,14 @@ def test_elim_reduce_mean_shape_one(tag): @fns def before(x, y): return reduce_mean(x, 0) + @fns def after(x, y): return x return fns[tag] + def test_elim_all_shape_one(tag): """ test_elim_all_shape_one """ fns = FnDict() @@ -447,12 +535,14 @@ def test_elim_all_shape_one(tag): @fns def before(x, y): return all_(x, 0) + @fns def after(x, y): return x return fns[tag] + def test_elim_sum_shape_one(tag): """ test_elim_sum_shape_one """ fns = FnDict() @@ -461,12 +551,14 @@ def test_elim_sum_shape_one(tag): @fns def before(x, y): return sum_(x, 0) + @fns def after(x, y): return x return fns[tag] + def test_tuple_getitem(tag): """ test_tuple_getitem """ fns = FnDict() @@ -479,15 +571,18 @@ def test_tuple_getitem(tag): @fns def make_get_1(x, y): return tuple_getitem(make_tuple(x, y), 1) + @fns def after_0(x, y): return x + @fns def after_1(x, y): return y return fns[tag] + def test_tuple_setitem(tag): """ test_tuple_setitem """ fns = FnDict() @@ -501,15 +596,18 @@ def test_tuple_setitem(tag): @fns def before_1(x, y, z): return tuple_setitem(make_tuple(x, y), 1, z) + @fns def after_0(x, y, z): return make_tuple(z, y) + @fns def after_1(x, y, z): return make_tuple(x, z) return fns[tag] + def test_tuple_get_set_item(tag): """ test_tuple_get_set_item """ fns = FnDict() @@ -533,6 +631,7 @@ def test_tuple_get_set_item(tag): return fns[tag] + def test_partial(tag): """ test_partial """ fns = FnDict() @@ -551,6 +650,7 @@ def test_partial(tag): return fns[tag] + def test_replace_applicator(tag): """ test_replace_applicator """ fns = FnDict() @@ -583,6 +683,7 @@ def test_replace_applicator(tag): return fns[tag] + def test_specialize_on_graph_arguments(tag): """ test_specialize_on_graph_arguments """ fns = FnDict() @@ -600,9 +701,12 @@ def test_specialize_on_graph_arguments(tag): def after(x, y): def helper(x, y): return scalar_add(f1(x), f2(y)) + return helper(x, y) + return fns[tag] + def test_incorporate_getitem(tag): """ test_incorporate_getitem """ fns = FnDict() @@ -613,32 +717,38 @@ def test_incorporate_getitem(tag): def before1(x, y): def fn(x, y): return f1(x, y), f2(x, y) + return tuple_getitem(fn(x, y), 0) @fns def after1(x, y): def fn(x, y): return f1(x, y) + return fn(x, y) @fns def before2(x, y): def fn(x, y): return x + return tuple_getitem(fn(x, y), 0) @fns def after2(x, y): def fn(x, y): return tuple_getitem(x, 0) + return fn(x, y) return fns[tag] + def test_incorporate_getitem_through_switch(tag): """ test_incorporate_getitem_through_switch """ fns = FnDict() scalar_gt = Primitive('scalar_gt') + @fns def before(x, y): def f1(x, y): @@ -663,6 +773,7 @@ def test_incorporate_getitem_through_switch(tag): return fns[tag] + def test_incorporate_call(tag): """ test_incorporate_call """ fns = FnDict() @@ -673,7 +784,9 @@ def test_incorporate_call(tag): def fn(q): def subf(z): return f1(q, z) + return subf + return fn(x)(y) @fns @@ -681,11 +794,14 @@ def test_incorporate_call(tag): def fn(q, y): def subf(z): return f1(q, z) + return subf(y) + return fn(x, y) return fns[tag] + def test_incorporate_call_through_switch(tag): """ test_incorporate_call_through_switch """ fns = FnDict() @@ -693,14 +809,18 @@ def test_incorporate_call_through_switch(tag): f2 = Primitive('f2') scalar_gt = Primitive('scalar_gt') identity = Primitive('identity') + @fns def before(x, y, z): def f1g(): return f1 + def f2g(): return f2 + def fn(): return switch(scalar_gt(x, 0), f1g, f2g)() + return fn()(y, z) @fns @@ -708,9 +828,12 @@ def test_incorporate_call_through_switch(tag): def fn(y, z): def tb(y, z): return f1(y, z) + def fb(y, z): return f2(y, z) + return switch(scalar_gt(x, 0), tb, fb)(y, z) + return fn(y, z) return fns[tag] @@ -720,6 +843,7 @@ def test_float_tuple_getitem_through_switch(tag): """ test_float_tuple_getitem_through_switch """ fns = FnDict() scalar_gt = Primitive('scalar_gt') + @fns def before(x, y): return tuple_getitem(switch(scalar_gt(x, 0), x, y), 0) @@ -730,11 +854,13 @@ def test_float_tuple_getitem_through_switch(tag): return fns[tag] + def test_merge_addn(tag): """ test_merge_addn """ fns = FnDict() addn = P.AddN() AddN = P.AddN + @fns def before(x, y, z, a): return addn((addn((a, x, y)), z)) @@ -745,12 +871,14 @@ def test_merge_addn(tag): return fns[tag] + def test_addn_zero(tag): """ test_addn_zero """ fns = FnDict() addn = P.AddN() AddN = P.AddN zero_tensor = Primitive('zeros_like_tensor') + @fns def before_1(x, y, z, a): return addn((a, zero_tensor(x), zero_tensor(y), z)) @@ -773,6 +901,7 @@ def test_addn_zero(tag): return fns[tag] + def test_convert_switch_ops(tag): fns = FnDict() ge_switch = Primitive('GeSwitch') @@ -781,6 +910,7 @@ def test_convert_switch_ops(tag): neg = Primitive('Neg') tuple_getitem = Primitive('tuple_getitem') make_tuple = Primitive('make_tuple') + @fns def before(cond, x, y): if cond: @@ -788,6 +918,7 @@ def test_convert_switch_ops(tag): else: z = neg(y) return z + @fns def after(cond, x, y): sw1 = ge_switch(x, cond) @@ -802,6 +933,7 @@ def test_convert_switch_ops(tag): merge_res = merge(tup) res = tuple_getitem(merge_res, 0) return res + return fns[tag] @@ -809,6 +941,7 @@ def test_minmax_grad(tag): """ test_minmax_grad """ fns = FnDict() min_grad = G.MinimumGrad() + @fns def before_11(x, y, dout): return tuple_getitem(min_grad(x, y, dout), 0) @@ -823,6 +956,7 @@ def test_minmax_grad(tag): return tuple_getitem(a, 0), tuple_getitem(a, 1) max_grad = G.MaximumGrad() + @fns def before_31(x, y, dout): return tuple_getitem(max_grad(x, y, dout), 0) @@ -879,6 +1013,7 @@ def test_reducesum_one(tag): return fns[tag] + def test_print_tuple_wrapper(tag): fns = FnDict() print_ = Primitive('Print') @@ -906,6 +1041,7 @@ def test_print_tuple_wrapper(tag): return fns[tag] + def test_constant_duplicate_mul(tag): fns = FnDict() Mul = Primitive('Mul'); diff --git a/tests/ut/cpp/python_input/gtest_input/pipeline/infer/infer_test.py b/tests/ut/cpp/python_input/gtest_input/pipeline/infer/infer_test.py index 3c3674747a..77fd75dce1 100644 --- a/tests/ut/cpp/python_input/gtest_input/pipeline/infer/infer_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pipeline/infer/infer_test.py @@ -114,6 +114,7 @@ def test_graph_infer_defaults(): x = 100 y = 20 return func_call(x, y) + return test_call_variable diff --git a/tests/ut/cpp/python_input/gtest_input/pipeline/infer/primitive_test.py b/tests/ut/cpp/python_input/gtest_input/pipeline/infer/primitive_test.py index 9f583d589a..d34b1d4971 100644 --- a/tests/ut/cpp/python_input/gtest_input/pipeline/infer/primitive_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pipeline/infer/primitive_test.py @@ -17,6 +17,7 @@ from mindspore.common import dtype from mindspore.ops import prim_attr_register, PrimitiveWithInfer from mindspore.ops import operations as P + def get_add(a, b): return a + b @@ -26,15 +27,22 @@ def get_f(v): relu = nn.ReLU() + + def get_relu(x): return relu(x) + softmax_cross_entropy_with_logits = P.SoftmaxCrossEntropyWithLogits() + + def get_softmax_cross_entropy_with_logits(logits, labels): return softmax_cross_entropy_with_logits(logits, labels) + class TensorToScalar(PrimitiveWithInfer): """this is a test primitive for cases that has tensor input, but has only one scalar output""" + @prim_attr_register def __init__(self): """init""" @@ -49,7 +57,10 @@ class TensorToScalar(PrimitiveWithInfer): # pylint: disable=unused-argument return dtype.float64 + tensorToScalar = TensorToScalar() + + def get_tensor_to_scalar(logits, labels): return tensorToScalar(logits, labels) @@ -60,15 +71,21 @@ conv2d = P.Conv2D(64, pad=1, stride=2) + def get_conv2d(x, w): return conv2d(x, w) + conv2dNative = P.DepthwiseConv2dNative(3, (3, 3), pad_mode="pad", pad=1, stride=2) + def get_conv2d_native(x, w): return conv2dNative(x, w) + biasAdd = P.BiasAdd() + + def get_bias_add(x, b): return biasAdd(x, b) @@ -76,8 +93,10 @@ def get_bias_add(x, b): def test_conv2d(out_channel, kernel_size, pad, stride, dilation): conv = P.Conv2D(out_channel=out_channel, kernel_size=kernel_size, pad_mode="pad", pad=pad, stride=stride, dilation=dilation) + def get_conv(x, w): return conv(x, w) + return get_conv @@ -85,8 +104,10 @@ def test_dropout(): dropOutGenMask = P.DropoutGenMask() dropoutDoMask = P.DropoutDoMask() shape = P.Shape() + def get_dropout(x, prob): mask = dropOutGenMask(shape(x), prob) y = dropoutDoMask(x, mask, prob) return y + return get_dropout diff --git a/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parse_class.py b/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parse_class.py index 93f33d07e1..a5f8245d71 100644 --- a/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parse_class.py +++ b/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parse_class.py @@ -26,7 +26,6 @@ from mindspore.common.api import _executor from mindspore.common.tensor import Tensor from mindspore.common.parameter import Parameter - log = logging.getLogger("test") log.setLevel(level=logging.ERROR) @@ -37,11 +36,12 @@ class ResNet(nn.Cell): self.weight = Parameter(tensor, name="weight") def construct(self, x, y): - return x + y*self.weight + return x + y * self.weight def get_params(self): return None + class SimpleNet(nn.Cell): def __init__(self, network, tensor, use_net=False): super(SimpleNet, self).__init__() diff --git a/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parse_compile.py b/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parse_compile.py index 9fd56f7a51..b1bc578789 100644 --- a/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parse_compile.py +++ b/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parse_compile.py @@ -26,10 +26,10 @@ from mindspore.common.tensor import Tensor from mindspore.nn.optim import Momentum from mindspore.train.model import Model - log = logging.getLogger("test") log.setLevel(level=logging.ERROR) + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -43,8 +43,10 @@ class Net(nn.Cell): out = self.flatten(x) return out + loss = nn.MSELoss() + def test_build(): input_data = Tensor(np.random.randint(0, 255, [1, 3, 224, 224])) input_label = Tensor(np.random.randint(0, 10, [1, 10])) diff --git a/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parse_primitive.py b/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parse_primitive.py index 028ee304d5..df09228efa 100644 --- a/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parse_primitive.py +++ b/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parse_primitive.py @@ -30,11 +30,9 @@ from mindspore.ops import Primitive, prim_attr_register from mindspore.ops import functional as F from mindspore.train.model import Model - log = logging.getLogger("test") log.setLevel(level=logging.ERROR) - # use method1: create instance outside function relu_test = Primitive('relu_test') @@ -47,10 +45,10 @@ def test_ops_f1(x, y): # use method2: create instance outside function use an operator with parameters class Conv_test(Primitive): @prim_attr_register - def __init__(self, stride = 0, pad = 1): + def __init__(self, stride=0, pad=1): print('in conv_test init', self.stride) - def __call__(self, x = 0, y = 1, z= 2): + def __call__(self, x=0, y=1, z=2): pass @@ -70,7 +68,7 @@ class ResNet(nn.Cell): self.conv = Conv_test(3, 5) def construct(self, x, y, train="train"): - return x + y*self.weight + self.conv(x) + return x + y * self.weight + self.conv(x) def get_params(self): return None @@ -121,6 +119,7 @@ def test_primitive_obj_parameter(): model = SimpleNet_1() return model + # use method4: call primitive ops with parameters class SimpleNet_2(nn.Cell): def __init__(self): diff --git a/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parser_integrate.py b/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parser_integrate.py index 39f6244877..bd8b11cd9e 100644 --- a/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parser_integrate.py +++ b/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parser_integrate.py @@ -31,11 +31,13 @@ from mindspore.train.model import Model def test_high_order_function(a): def f(g, x): return scalar_mul(g(x, x), g(x, x)) + return f(scalar_add, a) def test_hof_tup(a, b): """Test higher order functions.""" + def f(gh, x, y): g, h = gh return scalar_mul(g(x, y), h(x, y)) @@ -47,10 +49,12 @@ def scalar_mul(x, y): """Implement `scalar_mul`.""" return x * y + def scalar_add(x, y): """implement scalar_add""" return x + y + def test_while_2(x, y, z): rval = 0 # Cannot compare to 0 or finite diff is unstable @@ -59,6 +63,7 @@ def test_while_2(x, y, z): x = x - z return rval + def test_nested_closure(x): a = x * x b = x + 5 @@ -69,14 +74,18 @@ def test_nested_closure(x): def h(): return a * b + return g if x < 0 else h + return f()() + def test_functions_in_tuples(x, y): tup = scalar_add, scalar_mul f, g = tup return f(x, y) + g(x, y) + def test_closures_in_tuples(x, y): def f(): return x * y @@ -88,10 +97,12 @@ def test_closures_in_tuples(x, y): ff, gg = tup return ff() + gg() + @ms_function def add(x, y): return x + y + def test_tensor_add(): X = me.tensor() Y = me.tensor() @@ -103,12 +114,15 @@ def test_tensor_add(): print("test tensor add") return sum + def loss_func(x, y): return x - y + def optimizer(x): return x + def test_resetnet50_build(): X = me.tensor() Y = me.tensor() @@ -117,40 +131,50 @@ def test_resetnet50_build(): network = resnet50() model = Model(network=network, loss_fn=loss_func, optimizer=optimizer) + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.conv = nn.Conv2d(3, 64, 3, bias_init='zeros') + def construct(self, input): return self.conv(input) + class TestNet(nn.Cell): def __init__(self): super(TestNet, self).__init__() self.param = Parameter(Tensor([1, 3, 16, 50]), "param") + def construct(self, input): self.param = self.param + input return self.param + def test_compile_conv2d(): net = Net() input = Tensor(np.ones([1, 3, 16, 50]).astype(np.float32)) _executor.compile(net, input) + def test_none(x, y): def func(x, y): if y == None: return x return x + y + return func(x, y) + def test_get_attr(x): - a = F.scalar_mul(x ,x) + a = F.scalar_mul(x, x) return a + @ms_function def known(): return unknown() + def test_undefined_symbol(): known() diff --git a/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parser_test.py b/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parser_test.py index fd7a500da7..c0a41debc6 100644 --- a/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parser_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parser_test.py @@ -24,10 +24,12 @@ from dataclasses import dataclass log = logging.getLogger("test") log.setLevel(level=logging.ERROR) + # Test:common function def test_f(x, y): return x - y + def test_if(x, y): if x: z = x + y @@ -35,9 +37,11 @@ def test_if(x, y): z = y * y return z + def test_ifexp(x, y): - z = (x + y) if x else y*y - return z + z = (x + y) if x else y * y + return z + def test_if_nested(x, y, t): if x: @@ -48,23 +52,26 @@ def test_if_nested(x, y, t): z = z * z else: if t: - z = t*t + z = t * t else: z = t + x return z + def test_while(x, y): z = x + y while z: z = x + x return z + def test_for(x, y): z = y for index in x: z = z + index return z + def test_compare_lt(x, y): z = 0 if x < y: @@ -73,6 +80,7 @@ def test_compare_lt(x, y): z = y return z + def test_compare_gt(x, y): z = 0 if x > y: @@ -81,6 +89,7 @@ def test_compare_gt(x, y): z = y return z + def test_compare_ge(x, y): z = 0 if x >= y: @@ -89,6 +98,7 @@ def test_compare_ge(x, y): z = y return z + def test_compare_le(x, y): z = 0 if x <= y: @@ -97,6 +107,7 @@ def test_compare_le(x, y): z = y return z + def test_compare_eq(x, y): z = 0 if x == y: @@ -105,6 +116,7 @@ def test_compare_eq(x, y): z = y return z + def test_compare_ne(x, y): z = 0 if x != y: @@ -113,13 +125,15 @@ def test_compare_ne(x, y): z = y return z + def test_boolop_two_and(x, y): - if x and y : + if x and y: t = x + y else: - t =0 + t = 0 return t + def test_boolop_three_and(x, y, z): if x and y and z: t = x + y @@ -127,13 +141,15 @@ def test_boolop_three_and(x, y, z): t = z return t + def test_boolop_two_or(x, y): - if x or y : + if x or y: t = x + y else: - t =0 + t = 0 return t + def test_boolop_three_or(x, y, z): if x or y or z: t = x + y @@ -141,6 +157,7 @@ def test_boolop_three_or(x, y, z): t = z return t + def test_boolop_mix_and_or(x, y, z): if x and y or z: t = x + y @@ -148,83 +165,105 @@ def test_boolop_mix_and_or(x, y, z): t = z return t + def test_lambda(x, y): - l = lambda x,y: x * y + l = lambda x, y: x * y t = l(x, y) return t + def test_funcdef(x, y): def mymax(a, b): if a > b: return a else: return b + t = mymax(x, y) return t + def test_tuple_fn(x, y): l = (1, 2, 3, 5, 7) l = l + l[y] return l + def test_list_fn(x, y): l = [1, 2, 3, 5, 7] l = l + l[y] return l + # Test:resolve function def get_resolve_fn(x, y): return test_f(x, y) + # Test:no return function def get_no_return_fn(x, y): x + y + def testDoNum(): return 1 + def testDoStr(): return "str" + def testDoNamedConstTrue(): return True + def testDoNamedConstFalse(): return False + def testDoNamedConstNone(): return None -#Test_Class_type + +# Test_Class_type @dataclass class TestFoo: - x : float - y : int + x: float + y: int + def inf(self): return self.x + def test_class_fn(x): foo = TestFoo(x, 1) return foo.inf() + # custom test function def test_custom(x, y, z): def g(x1, y1): def h(x2): return x2 + y1 + z + return h(x1) + return g(x, y) + def test_simple_closure(a, b): """Test some trivial closures.""" z = 1 + def f(): - return a + z + return a + z def g(): return b + 2.0 + return f() * g() + def test_assign_tuple(x, y): a = 1 b = 2 @@ -232,22 +271,28 @@ def test_assign_tuple(x, y): c, d = t return c + d + def test_unary(x, y): a = -x z = a + y return z + def f1(x, y): return x + y + def test_reslove_closure(x): z = x - def in_f2(x,y): - x = f1(x,y) + + def in_f2(x, y): + x = f1(x, y) return x + y + z + return in_f2 -def test_augassign(x ,y): + +def test_augassign(x, y): x += x y -= x return y @@ -263,10 +308,12 @@ def test_sys_call(x, y): a = len(x) + len(y) return a + def test_bool_not(x, y): z = x and y return not z + def test_call_fn_use_tuple(y): log.info("the y is :%r", y) log.info("y type is :%r", type(y)) @@ -275,19 +322,23 @@ def test_call_fn_use_tuple(y): log.info("The parameter is: %r", i) return z + def test_subscript_setitem(): t = [1, 2, 5, 6] t[2] = t[2] + 7 t[3] = t[3] + 1 return t + def test_dict(): ret = {"a": 1, "b": 2} return ret + def func_call(x, y, *var, a=0, b=1, **kwargs): return x + y + var[0] + a + b + kwargs["z"] + def test_call_variable(): t = (1, 2, 3) d = {"z": 10, "e": 11} diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/batch_norm_grad_split.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/batch_norm_grad_split.py index afcc1fe292..68824a81d1 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/batch_norm_grad_split.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/batch_norm_grad_split.py @@ -22,6 +22,7 @@ bn_training_reduce_grad = Primitive('BNTrainingReduceGrad') make_tuple = Primitive('make_tuple') tuple_getitem = Primitive('tuple_getitem') + class FnDict: def __init__(self): self.fnDict = {} @@ -32,6 +33,7 @@ class FnDict: def __getitem__(self, name): return self.fnDict[name] + def test_batch_norm_grad_split(tag): fns = FnDict() diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/batchnorm_to_bninfer.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/batchnorm_to_bninfer.py index bafe21ddde..6912ce4146 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/batchnorm_to_bninfer.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/batchnorm_to_bninfer.py @@ -21,6 +21,7 @@ bn_infer = Primitive('BNInfer') make_tuple = Primitive('make_tuple') tuple_getitem = Primitive('tuple_getitem') + class FnDict: def __init__(self): self.fnDict = {} @@ -31,6 +32,7 @@ class FnDict: def __getitem__(self, name): return self.fnDict[name] + def test_batchnorm_to_bninfer(tag): fns = FnDict() diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/batchnormgrad_to_bninfergrad.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/batchnormgrad_to_bninfergrad.py index 8f20da2ab0..c9852d91fa 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/batchnormgrad_to_bninfergrad.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/batchnormgrad_to_bninfergrad.py @@ -22,6 +22,7 @@ bn_infer_grad = Primitive('BNInferGrad') make_tuple = Primitive('make_tuple') tuple_getitem = Primitive('tuple_getitem') + class FnDict: def __init__(self): self.fnDict = {} @@ -32,6 +33,7 @@ class FnDict: def __getitem__(self, name): return self.fnDict[name] + def test_batchnormgrad_to_bninfergrad(tag): fns = FnDict() diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/confusion_mul_grad_fusion.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/confusion_mul_grad_fusion.py index d8f7bcc996..fbe43522a8 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/confusion_mul_grad_fusion.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/confusion_mul_grad_fusion.py @@ -22,6 +22,7 @@ make_tuple = Primitive('make_tuple') tuple_getitem = Primitive('tuple_getitem') axis = 2 + class FnDict: def __init__(self): self.fnDict = {} @@ -32,6 +33,7 @@ class FnDict: def __getitem__(self, name): return self.fnDict[name] + def test_confusion_mul_grad_fusion(tag): fns = FnDict() diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/derelu_fusion.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/derelu_fusion.py index 767f85332f..9aae005e9d 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/derelu_fusion.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/derelu_fusion.py @@ -22,6 +22,7 @@ relu_grad_v2 = Primitive('ReluGradV2') make_tuple = Primitive('make_tuple') tuple_getitem = Primitive('tuple_getitem') + class FnDict: def __init__(self): self.fnDict = {} @@ -32,6 +33,7 @@ class FnDict: def __getitem__(self, name): return self.fnDict[name] + def test_derelu_fusion(tag): fns = FnDict() diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/eliminate_redundant_op_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/eliminate_redundant_op_test.py index 961a3ae8a5..c40af821b9 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/eliminate_redundant_op_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/eliminate_redundant_op_test.py @@ -24,6 +24,7 @@ transdata = Primitive("TransData") cast = Primitive('Cast') depend = Primitive('depend') + class FnDict: def __init__(self): self.fnDict = {} diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/matmul_biasadd_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/matmul_biasadd_fusion_test.py index 5d55e854f7..03d476b701 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/matmul_biasadd_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/matmul_biasadd_fusion_test.py @@ -19,6 +19,7 @@ MatMul = P.MatMul() BiasAdd = P.BiasAdd() make_tuple = Primitive('make_tuple') + class FnDict: def __init__(self): self.fnDict = {} diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/momentum_lossscale_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/momentum_lossscale_fusion_test.py index c882029e90..b2464ecc46 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/momentum_lossscale_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/momentum_lossscale_fusion_test.py @@ -24,6 +24,7 @@ tuple_getitem = Primitive('tuple_getitem') make_tuple = Primitive('make_tuple') constant = Tensor(1.0, mstype.float32) + class FnDict: def __init__(self): self.fnDict = {} diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/mul_add_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/mul_add_fusion_test.py index f3100b474a..ba7e8738b1 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/mul_add_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/mul_add_fusion_test.py @@ -21,6 +21,7 @@ fused_mul_add = Primitive('FusedMulAdd') make_tuple = Primitive('make_tuple') tuple_getitem = Primitive('tuple_getitem') + class FnDict: def __init__(self): self.fnDict = {} @@ -31,6 +32,7 @@ class FnDict: def __getitem__(self, name): return self.fnDict[name] + def test_mul_add_fusion(tag): fns = FnDict() diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/reshape_transpose_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/reshape_transpose_fusion_test.py index c440deffca..f5c7c5f3bb 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/reshape_transpose_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/reshape_transpose_fusion_test.py @@ -20,6 +20,7 @@ Reshape = P.Reshape() ConfusionTransposeD = Primitive('ConfusionTransposeD') make_tuple = Primitive('make_tuple') + class FnDict: def __init__(self): self.fnDict = {} diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/topk_split_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/topk_split_test.py index b955c2bf11..2b3f9f79f8 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/topk_split_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/topk_split_test.py @@ -19,6 +19,7 @@ from mindspore.ops import operations as P TopK = P.TopK() tuple_getitem = Primitive('tuple_getitem') + class FnDict: def __init__(self): self.fnDict = {} diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_reshape_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_reshape_fusion_test.py index f1bc5f20fd..a266d335f1 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_reshape_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_reshape_fusion_test.py @@ -20,6 +20,7 @@ Reshape = P.Reshape() ConfusionTransposeD = Primitive('ConfusionTransposeD') make_tuple = Primitive('make_tuple') + class FnDict: def __init__(self): self.fnDict = {} diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_transdata_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_transdata_fusion_test.py index 657e2938cb..48050ea03b 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_transdata_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_transdata_fusion_test.py @@ -22,6 +22,7 @@ make_tuple = Primitive('make_tuple') transdata = Primitive("TransData") Transpose = P.Transpose() + class FnDict: def __init__(self): self.fnDict = {} diff --git a/tests/ut/cpp/python_input/gtest_input/pynative/ops_test.py b/tests/ut/cpp/python_input/gtest_input/pynative/ops_test.py index c7de09dbcb..05964bcfd4 100644 --- a/tests/ut/cpp/python_input/gtest_input/pynative/ops_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pynative/ops_test.py @@ -18,24 +18,26 @@ from mindspore.ops import operations as P from mindspore.ops.vm_impl_registry import vm_impl_registry as vm_impl_getters from mindspore.common.tensor import Tensor + def im2col(img, filter_h, filter_w, stride=1, pad=0, dilation=1): """Rearranges an image to row vector""" batch_num, channel, height, width = img.shape - out_h = (height + 2*pad - filter_h - (filter_h - 1) * (dilation[2] - 1))//stride[2] + 1 - out_w = (width + 2*pad - filter_w - (filter_w - 1) * (dilation[3] - 1))//stride[3] + 1 + out_h = (height + 2 * pad - filter_h - (filter_h - 1) * (dilation[2] - 1)) // stride[2] + 1 + out_w = (width + 2 * pad - filter_w - (filter_w - 1) * (dilation[3] - 1)) // stride[3] + 1 img = np.pad(img, [(0, 0), (0, 0), (pad, pad), (pad, pad)], 'constant') col = np.zeros((batch_num, channel, filter_h, filter_w, out_h, out_w)).astype(img.dtype) for y in range(filter_h): - y_max = y + stride[2]*out_h + y_max = y + stride[2] * out_h for x in range(filter_w): - x_max = x + stride[2]*out_w + x_max = x + stride[2] * out_w col[:, :, y, x, :, :] = img[:, :, y:y_max:stride[2], x:x_max:stride[2]] - col = col.transpose(0, 4, 5, 1, 2, 3).reshape(batch_num*out_h*out_w, -1) + col = col.transpose(0, 4, 5, 1, 2, 3).reshape(batch_num * out_h * out_w, -1) return col + # pylint: disable=unused-argument def conv2d(x, weight, bias=None, stride=1, pad=0, dilation=1, groups=1, padding_mode='zeros'): @@ -56,12 +58,14 @@ def conv2d(x, weight, bias=None, stride=1, pad=0, @vm_impl_getters.register(P.Conv2D) def vm_impl_conv2d(self): """Generate vm_impl function for Conv2D""" + def vm_impl(x, w): x = x.asnumpy() weight = w.asnumpy() bias = None out = conv2d(x, weight, bias, self.stride, self.pad, self.dilation) return Tensor(out) + return vm_impl diff --git a/tests/ut/cpp/python_input/gtest_input/session/session_test.py b/tests/ut/cpp/python_input/gtest_input/session/session_test.py index 3ee0415fc5..edd3dc6e9b 100644 --- a/tests/ut/cpp/python_input/gtest_input/session/session_test.py +++ b/tests/ut/cpp/python_input/gtest_input/session/session_test.py @@ -16,7 +16,6 @@ from mindspore.ops import operations as P from mindspore.ops import Primitive import mindspore as ms - addn = P.AddN() add = P.TensorAdd() reshape = P.Reshape() @@ -24,11 +23,13 @@ cast = P.Cast() tuple_getitem = Primitive('tuple_getitem') max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2) + def test_addn_cast(x, y, z): mysum = addn((x, y)) res = cast(mysum, ms.float16) return res + def test_addn_with_max_pool(x, y): mysum = addn((x, y)) output = max_pool(mysum) diff --git a/tests/ut/cpp/python_input/gtest_input/transform/multi_relu_case.py b/tests/ut/cpp/python_input/gtest_input/transform/multi_relu_case.py index d6c6ad4ab9..2b95acb025 100644 --- a/tests/ut/cpp/python_input/gtest_input/transform/multi_relu_case.py +++ b/tests/ut/cpp/python_input/gtest_input/transform/multi_relu_case.py @@ -14,11 +14,16 @@ # ============================================================================ """ multi_relu_case """ from mindspore.ops import Primitive + + # Test user define ops def get_test_ops_fn(): return test_ops_f + scalar_mul = Primitive('scalar_mul') + + def test_ops_f(x, y): z = scalar_mul(x, y) return z diff --git a/tests/ut/cpp/python_input/gtest_input/utils/graph_utils_test.py b/tests/ut/cpp/python_input/gtest_input/utils/graph_utils_test.py index e4249999eb..a5351c6677 100644 --- a/tests/ut/cpp/python_input/gtest_input/utils/graph_utils_test.py +++ b/tests/ut/cpp/python_input/gtest_input/utils/graph_utils_test.py @@ -13,11 +13,15 @@ # limitations under the License. # ============================================================================ """ graph_utils_test """ + + def test_graph_utils_isomorphic_1(a, b): return a + b + def test_graph_utils_isomorphic_2(x, y): return x + y + def test_graph_utils_isomorphic_3(x, y): return x * y diff --git a/tests/ut/cpp/python_input/gtest_input/vm/vm_test.py b/tests/ut/cpp/python_input/gtest_input/vm/vm_test.py index 947e9fa2c3..f9ed0ca900 100644 --- a/tests/ut/cpp/python_input/gtest_input/vm/vm_test.py +++ b/tests/ut/cpp/python_input/gtest_input/vm/vm_test.py @@ -18,14 +18,18 @@ from mindspore.ops import Primitive scala_add = Primitive('scalar_add') scala_mul = Primitive('scalar_mul') scalar_gt = Primitive('scalar_gt') + + def scalar_add(x, y): """Implement `scalar_add`.""" return scala_add(x, y) + def scalar_mul(x, y): """Implement `scalar_mul`.""" return scala_mul(x, y) + def test_if(x, y): if scalar_gt(x, y): return x diff --git a/tests/ut/data/dataset/testPyfuncMap/pyfuncmap.py b/tests/ut/data/dataset/testPyfuncMap/pyfuncmap.py index 2d20994758..9e1e7af3c9 100644 --- a/tests/ut/data/dataset/testPyfuncMap/pyfuncmap.py +++ b/tests/ut/data/dataset/testPyfuncMap/pyfuncmap.py @@ -14,10 +14,10 @@ # ============================================================================== import mindspore.dataset as ds - DATA_DIR = ["./data.data"] SCHEMA_DIR = "./schema.json" + def test_case_0(): """ Test PyFunc @@ -44,7 +44,6 @@ def test_case_1(): """ print("Test 1-n PyFunc : (lambda x : (x , x + x)) ") - col = "col0" # apply dataset operations @@ -68,7 +67,6 @@ def test_case_2(): """ print("Test n-1 PyFunc : (lambda x, y : x + y) ") - col = ["col0", "col1"] # apply dataset operations @@ -83,19 +81,20 @@ def test_case_2(): print("************** Output Tensor *****************") + def test_case_3(): """ Test PyFunc """ print("Test n-m PyFunc : (lambda x, y : (x , x + 1, x + y)") - col = ["col0", "col1"] # apply dataset operations ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) - ds1 = ds1.map(input_column_names=col, output_column_names=["out0", "out1", "out2"], operation=(lambda x, y: (x, x + y, x + x + y))) + ds1 = ds1.map(input_column_names=col, output_column_names=["out0", "out1", "out2"], + operation=(lambda x, y: (x, x + y, x + x + y))) print("************** Output Tensor *****************") for data in ds1.create_dict_iterator(): # each data is a dictionary @@ -108,19 +107,20 @@ def test_case_3(): print(data["out2"]) print("************** Output Tensor *****************") + def test_case_4(): """ Test PyFunc """ print("Test Parallel n-m PyFunc : (lambda x, y : (x , x + 1, x + y)") - col = ["col0", "col1"] # apply dataset operations ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) - ds1 = ds1.map(input_column_names=col, output_column_names=["out0", "out1", "out2"], num_parallel_workers = 4, operation=(lambda x, y: (x, x + y, x + x + y))) + ds1 = ds1.map(input_column_names=col, output_column_names=["out0", "out1", "out2"], num_parallel_workers=4, + operation=(lambda x, y: (x, x + y, x + x + y))) print("************** Output Tensor *****************") for data in ds1.create_dict_iterator(): # each data is a dictionary @@ -134,10 +134,9 @@ def test_case_4(): print("************** Output Tensor *****************") - if __name__ == "__main__": test_case_0() - #test_case_1() - #test_case_2() - #test_case_3() - #test_case_4() + # test_case_1() + # test_case_2() + # test_case_3() + # test_case_4() diff --git a/tests/ut/python/communication/__init__.py b/tests/ut/python/communication/__init__.py index 393930c4eb..6cb7088820 100644 --- a/tests/ut/python/communication/__init__.py +++ b/tests/ut/python/communication/__init__.py @@ -13,4 +13,5 @@ # limitations under the License. import sys -sys.path.append("../../..") \ No newline at end of file + +sys.path.append("../../..") diff --git a/tests/ut/python/communication/test_comm.py b/tests/ut/python/communication/test_comm.py index 31beb1fe5a..291998a30c 100644 --- a/tests/ut/python/communication/test_comm.py +++ b/tests/ut/python/communication/test_comm.py @@ -27,6 +27,7 @@ from mindspore.nn import ReLU from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn import Momentum import mindspore.context as context + # pylint: disable=W0212 # W0212: protected-access @@ -34,8 +35,10 @@ tag = 0 init("hccl") + class AllReduceNet(nn.Cell): """AllReduceNet definition""" + def __init__(self, input_channel, out_channel, op): super(AllReduceNet, self).__init__() self.dense = Dense(input_channel, out_channel) @@ -47,8 +50,10 @@ class AllReduceNet(nn.Cell): x = self.reduce(x) return self.relu(x) + class BroadCastNet(nn.Cell): """BroadCastNet definition""" + def __init__(self, input_channel, out_channel): super(BroadCastNet, self).__init__() self.dense = Dense(input_channel, out_channel) @@ -59,8 +64,10 @@ class BroadCastNet(nn.Cell): x = self.dense(x) return x + class AllGatherNet(nn.Cell): """AllGatherNet definition""" + def __init__(self, input_channel, out_channel): super(AllGatherNet, self).__init__() self.dense = Dense(input_channel, out_channel) @@ -78,8 +85,10 @@ class AllGatherNet(nn.Cell): x = self.allgather(x) return self.relu(x) + class ReduceScatterNet(nn.Cell): """ReduceScatterNet definition""" + def __init__(self, input_channel, out_channel, op): super(ReduceScatterNet, self).__init__() self.dense = Dense(input_channel, out_channel) @@ -91,8 +100,10 @@ class ReduceScatterNet(nn.Cell): x = self.reducescatter(x) return self.relu(x) + class AlltoAllNet(nn.Cell): """AlltoAllNet definition""" + def __init__(self, input_channel, out_channel): super(AlltoAllNet, self).__init__() self.dense = Dense(input_channel, out_channel) @@ -104,6 +115,7 @@ class AlltoAllNet(nn.Cell): x = self.alltoall(x) return self.relu(x) + def run_allreduce(op): """run_allreduce""" context.set_context(mode=context.GRAPH_MODE) @@ -118,6 +130,7 @@ def run_allreduce(op): network = TrainOneStepCell(network, optimizer) _executor.compile(network, input_tensor, label_tensor) + def test_allreduce(): """test_allreduce""" context.set_context(mode=context.GRAPH_MODE) @@ -125,6 +138,7 @@ def test_allreduce(): run_allreduce(ReduceOp.MAX) run_allreduce(ReduceOp.MIN) + def test_allgather(): """test_allgather""" context.set_context(mode=context.GRAPH_MODE) @@ -139,6 +153,7 @@ def test_allgather(): network = TrainOneStepCell(network, optimizer) _executor.compile(network, input_tensor, label_tensor) + def run_reducescatter(op): """run_reducescatter""" context.set_context(mode=context.GRAPH_MODE) @@ -153,11 +168,13 @@ def run_reducescatter(op): network = TrainOneStepCell(network, optimizer) _executor.compile(network, input_tensor, label_tensor) + def test_reducescatter(): """test_reducescatter""" context.set_context(mode=context.GRAPH_MODE) run_reducescatter(ReduceOp.SUM) + def test_broadcast(): """test_broadcast""" context.set_context(mode=context.GRAPH_MODE) @@ -172,6 +189,7 @@ def test_broadcast(): network = TrainOneStepCell(network, optimizer) _executor.compile(network, input_tensor_1, label_tensor) + def test_alltoall(): """test_alltoall""" context.set_context(mode=context.GRAPH_MODE) diff --git a/tests/ut/python/communication/test_data_parallel_dense.py b/tests/ut/python/communication/test_data_parallel_dense.py index 2e9553ee46..0fc46e0801 100644 --- a/tests/ut/python/communication/test_data_parallel_dense.py +++ b/tests/ut/python/communication/test_data_parallel_dense.py @@ -29,6 +29,7 @@ import mindspore.context as context class DenseMMNet(nn.Cell): """DenseMMNet definition""" + def __init__(self): super(DenseMMNet, self).__init__() self.fc1 = nn.Dense(128, 768, activation='relu') diff --git a/tests/ut/python/communication/test_data_parallel_lenet.py b/tests/ut/python/communication/test_data_parallel_lenet.py index 658995ae3e..b3043aa25f 100755 --- a/tests/ut/python/communication/test_data_parallel_lenet.py +++ b/tests/ut/python/communication/test_data_parallel_lenet.py @@ -27,8 +27,10 @@ from mindspore.nn.optim import Momentum _current_dir = os.path.dirname(os.path.realpath(__file__)) + "/../test_data" + class LeNet5(nn.Cell): """LeNet5 definition""" + def __init__(self): super(LeNet5, self).__init__() self.conv1 = nn.Conv2d(1, 6, 5) @@ -52,6 +54,7 @@ class LeNet5(nn.Cell): class DatasetLenet(): """DatasetLenet definition""" + def __init__(self, predict, label, length=3): self.predict = predict self.label = label diff --git a/tests/ut/python/communication/test_management_api.py b/tests/ut/python/communication/test_management_api.py index d624c5ab59..550fc44d3c 100644 --- a/tests/ut/python/communication/test_management_api.py +++ b/tests/ut/python/communication/test_management_api.py @@ -17,6 +17,7 @@ management api """ import mindspore.communication.management as D + def has_raise_error(func, x): try: # pylint:disable=eval-used @@ -30,79 +31,96 @@ def has_raise_error(func, x): else: return False + def create_backend(name): D.Backend(name) + def get_group_size_int(group): D.get_group_size(group) + def create_group0(x): D.GlobalComm.BACKEND = D.Backend.HCCL D.create_group('0-1', x) + def create_group1(x): D.GlobalComm.BACKEND = D.Backend.HCCL D.create_group('0-1', x) + def create_group2(x): D.GlobalComm.BACKEND = D.Backend.HCCL D.create_group('0-1', x) + def create_group3(x): D.GlobalComm.BACKEND = D.Backend.UNDEFINED D.create_group('0-1', x) + def create_group4(x): D.GlobalComm.BACKEND = D.Backend.HCCL D.create_group('0-1', x) + def get_world_rank_from_group_rank0(): D.GlobalComm.BACKEND = D.Backend.HCCL D.get_world_rank_from_group_rank(D.HCCL_WORLD_COMM_GROUP, 0) + def get_world_rank_from_group_rank1(): D.GlobalComm.BACKEND = D.Backend.HCCL D.get_world_rank_from_group_rank('0-1', '0') + def get_world_rank_from_group_rank2(): D.GlobalComm.BACKEND = D.Backend.UNDEFINED D.get_world_rank_from_group_rank('0-1', 0) + def get_group_rank_from_world_rank0(): D.GlobalComm.BACKEND = D.Backend.HCCL D.get_group_rank_from_world_rank(0, D.HCCL_WORLD_COMM_GROUP) + def get_group_rank_from_world_rank1(): D.GlobalComm.BACKEND = D.Backend.HCCL D.get_group_rank_from_world_rank('0', '0-1') + def get_group_rank_from_world_rank2(): D.GlobalComm.BACKEND = D.Backend.UNDEFINED D.get_group_rank_from_world_rank(0, '0-1') + def destroy_group0(x): D.GlobalComm.BACKEND = D.Backend.UNDEFINED D.destroy_group(x) + def destroy_group1(): D.GlobalComm.BACKEND = D.Backend.HCCL D.destroy_group(D.HCCL_WORLD_COMM_GROUP) + def destroy_group2(x): D.GlobalComm.BACKEND = D.Backend.HCCL D.destroy_group(x) + def test_raise_error_funcs(): """test raise error funcs""" assert has_raise_error(create_backend, 123) is True assert has_raise_error(create_backend, 'hccl') is False assert has_raise_error(create_backend, 'nccl') is False assert has_raise_error(get_group_size_int, 123) is True - assert has_raise_error(create_group0, (0,1)) is True + assert has_raise_error(create_group0, (0, 1)) is True assert has_raise_error(create_group1, [0]) is False - assert has_raise_error(create_group2, [0,0,1]) is True - assert has_raise_error(create_group3, [0,1]) is True - assert has_raise_error(create_group4, [0,1]) is False + assert has_raise_error(create_group2, [0, 0, 1]) is True + assert has_raise_error(create_group3, [0, 1]) is True + assert has_raise_error(create_group4, [0, 1]) is False assert has_raise_error(get_world_rank_from_group_rank0, None) is True assert has_raise_error(get_world_rank_from_group_rank1, None) is True assert has_raise_error(get_world_rank_from_group_rank2, None) is True @@ -113,13 +131,14 @@ def test_raise_error_funcs(): assert has_raise_error(destroy_group1, None) is True assert has_raise_error(destroy_group2, '0-1') is False + def test_get_rank_none(): assert D.get_rank(group=None) == 0 + def test_group_funs(): D.GlobalComm.BACKEND = D.Backend.HCCL assert D.get_group_size(group=None) == 1 assert D.get_group_size('2-abcd') == 2 assert D.get_world_rank_from_group_rank('0-1', 0) == 0 assert D.get_group_rank_from_world_rank(0, '0-1') == 0 - diff --git a/tests/ut/python/conftest.py b/tests/ut/python/conftest.py index ff49460f9f..6850f956a5 100644 --- a/tests/ut/python/conftest.py +++ b/tests/ut/python/conftest.py @@ -20,6 +20,7 @@ import pytest from _pytest.runner import runtestprotocol + def pytest_addoption(parser): """ add runmode option to control running testcase @@ -37,6 +38,7 @@ def test_with_simu(request): """ return request.config.getoption("--runmode") == "simu" + # https://stackoverflow.com/questions/14121657/how-to-get-test-name-and-test-result-during-run-time-in-pytest def pytest_runtest_protocol(item, nextitem): reports = runtestprotocol(item, nextitem=nextitem) diff --git a/tests/ut/python/dataset/test_2ops.py b/tests/ut/python/dataset/test_2ops.py index 65614c3274..627f365715 100644 --- a/tests/ut/python/dataset/test_2ops.py +++ b/tests/ut/python/dataset/test_2ops.py @@ -16,7 +16,6 @@ import mindspore.dataset as ds from mindspore import log as logger from util import save_and_check - DATA_DIR = ["../data/dataset/testTFTestAllTypes/test.data"] SCHEMA_DIR = "../data/dataset/testTFTestAllTypes/datasetSchema.json" COLUMNS = ["col_1d", "col_2d", "col_3d", "col_binary", "col_float", @@ -159,7 +158,7 @@ def test_2ops_shuffle_batch(): if __name__ == '__main__': test_2ops_repeat_shuffle() - #test_2ops_shuffle_repeat() + # test_2ops_shuffle_repeat() test_2ops_repeat_batch() test_2ops_batch_repeat() test_2ops_batch_shuffle() diff --git a/tests/ut/python/dataset/test_Tensor.py b/tests/ut/python/dataset/test_Tensor.py index 462020c90f..d9ee8754b4 100644 --- a/tests/ut/python/dataset/test_Tensor.py +++ b/tests/ut/python/dataset/test_Tensor.py @@ -41,6 +41,7 @@ def test_basic(): assert n.type() == cde.DataType("int64") assert arr.__array_interface__['data'] == arr2.__array_interface__['data'] + def test_strides(): x = np.array([[1, 2, 3], [4, 5, 6]]) n1 = cde.Tensor(x[:, 1]) @@ -53,6 +54,7 @@ def test_strides(): assert np.array_equal(x.transpose(), arr) + if __name__ == '__main__': test_shape() test_strides() diff --git a/tests/ut/python/dataset/test_apply.py b/tests/ut/python/dataset/test_apply.py index f2e7a79011..5993e9a58b 100644 --- a/tests/ut/python/dataset/test_apply.py +++ b/tests/ut/python/dataset/test_apply.py @@ -19,11 +19,13 @@ import numpy as np DATA_DIR = "../data/dataset/testPK/data" + # Generate 1d int numpy array from 0 - 64 def generator_1d(): for i in range(64): yield (np.array([i]),) + def test_apply_generator_case(): # apply dataset operations data1 = ds.GeneratorDataset(generator_1d, ["data"]) @@ -40,6 +42,7 @@ def test_apply_generator_case(): for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): assert np.array_equal(item1["data"], item2["data"]) + def test_apply_imagefolder_case(): # apply dataset map operations data1 = ds.ImageFolderDatasetV2(DATA_DIR, num_shards=4, shard_id=3) @@ -49,19 +52,20 @@ def test_apply_imagefolder_case(): normalize_op = vision.Normalize([121.0, 115.0, 100.0], [70.0, 68.0, 71.0]) def dataset_fn(ds): - ds = ds.map(operations = decode_op) - ds = ds.map(operations = normalize_op) + ds = ds.map(operations=decode_op) + ds = ds.map(operations=normalize_op) ds = ds.repeat(2) return ds - + data1 = data1.apply(dataset_fn) - data2 = data2.map(operations = decode_op) - data2 = data2.map(operations = normalize_op) + data2 = data2.map(operations=decode_op) + data2 = data2.map(operations=normalize_op) data2 = data2.repeat(2) - + for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): assert np.array_equal(item1["image"], item2["image"]) + def test_apply_flow_case_0(id=0): # apply control flow operations data1 = ds.GeneratorDataset(generator_1d, ["data"]) @@ -92,6 +96,7 @@ def test_apply_flow_case_0(id=0): else: assert num_iter == 64 + def test_apply_flow_case_1(id=1): # apply control flow operations data1 = ds.GeneratorDataset(generator_1d, ["data"]) @@ -122,6 +127,7 @@ def test_apply_flow_case_1(id=1): else: assert num_iter == 64 + def test_apply_flow_case_2(id=2): # apply control flow operations data1 = ds.GeneratorDataset(generator_1d, ["data"]) @@ -152,6 +158,7 @@ def test_apply_flow_case_2(id=2): else: assert num_iter == 64 + def test_apply_flow_case_3(id=3): # apply control flow operations data1 = ds.GeneratorDataset(generator_1d, ["data"]) @@ -182,6 +189,7 @@ def test_apply_flow_case_3(id=3): else: assert num_iter == 64 + def test_apply_exception_case(): # apply exception operations data1 = ds.GeneratorDataset(generator_1d, ["data"]) @@ -217,7 +225,8 @@ def test_apply_exception_case(): assert False except ValueError: pass - + + if __name__ == '__main__': logger.info("Running test_apply.py test_apply_generator_case() function") test_apply_generator_case() @@ -233,4 +242,3 @@ if __name__ == '__main__': logger.info("Running test_apply.py test_apply_exception_case() function") test_apply_exception_case() - diff --git a/tests/ut/python/dataset/test_autocontrast.py b/tests/ut/python/dataset/test_autocontrast.py index 7dba2f21f6..c60f7ff3fa 100644 --- a/tests/ut/python/dataset/test_autocontrast.py +++ b/tests/ut/python/dataset/test_autocontrast.py @@ -37,65 +37,64 @@ def visualize(image_original, image_auto_contrast): plt.title("DE AutoContrast image") plt.show() - + def test_auto_contrast(plot=False): """ Test AutoContrast """ logger.info("Test AutoContrast") - + # Original Images - ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) - + ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) + transforms_original = F.ComposeOp([F.Decode(), - F.Resize((224,224)), - F.ToTensor()]) - + F.Resize((224, 224)), + F.ToTensor()]) + ds_original = ds.map(input_columns="image", operations=transforms_original()) - + ds_original = ds_original.batch(512) - - for idx, (image,label) in enumerate(ds_original): + + for idx, (image, label) in enumerate(ds_original): if idx == 0: - images_original = np.transpose(image, (0, 2,3,1)) + images_original = np.transpose(image, (0, 2, 3, 1)) else: images_original = np.append(images_original, - np.transpose(image, (0, 2,3,1)), - axis=0) + np.transpose(image, (0, 2, 3, 1)), + axis=0) + + # AutoContrast Images + ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) - # AutoContrast Images - ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) - transforms_auto_contrast = F.ComposeOp([F.Decode(), - F.Resize((224,224)), + F.Resize((224, 224)), F.AutoContrast(), - F.ToTensor()]) - + F.ToTensor()]) + ds_auto_contrast = ds.map(input_columns="image", - operations=transforms_auto_contrast()) - - ds_auto_contrast = ds_auto_contrast.batch(512) - - for idx, (image,label) in enumerate(ds_auto_contrast): + operations=transforms_auto_contrast()) + + ds_auto_contrast = ds_auto_contrast.batch(512) + + for idx, (image, label) in enumerate(ds_auto_contrast): if idx == 0: - images_auto_contrast = np.transpose(image, (0, 2,3,1)) + images_auto_contrast = np.transpose(image, (0, 2, 3, 1)) else: images_auto_contrast = np.append(images_auto_contrast, - np.transpose(image, (0, 2,3,1)), - axis=0) - + np.transpose(image, (0, 2, 3, 1)), + axis=0) + num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): - mse[i] = np.mean((images_auto_contrast[i]-images_original[i])**2) + mse[i] = np.mean((images_auto_contrast[i] - images_original[i]) ** 2) logger.info("MSE= {}".format(str(np.mean(mse)))) - + if plot: visualize(images_original, images_auto_contrast) - + if __name__ == "__main__": test_auto_contrast(plot=True) - diff --git a/tests/ut/python/dataset/test_batch.py b/tests/ut/python/dataset/test_batch.py index 891e3ed045..b2d1919a62 100644 --- a/tests/ut/python/dataset/test_batch.py +++ b/tests/ut/python/dataset/test_batch.py @@ -156,7 +156,7 @@ def test_batch_07(): # apply dataset operations data1 = ds.TFRecordDataset(DATA_DIR, shuffle=ds.Shuffle.FILES) data1 = data1.batch(num_parallel_workers=num_parallel_workers, drop_remainder=drop_remainder, - batch_size=batch_size) + batch_size=batch_size) assert sum([1 for _ in data1]) == 3 filename = "batch_07_result.npz" @@ -261,7 +261,6 @@ def test_batch_12(): save_and_check(data1, parameters, filename, generate_golden=GENERATE_GOLDEN) - def test_batch_exception_01(): """ Test batch exception: num_parallel_workers=0 diff --git a/tests/ut/python/dataset/test_concat.py b/tests/ut/python/dataset/test_concat.py index fad1288a04..db6ffebb59 100644 --- a/tests/ut/python/dataset/test_concat.py +++ b/tests/ut/python/dataset/test_concat.py @@ -31,6 +31,7 @@ def generator_10(): for i in range(3, 10): yield np.array([i]), + # In generator_20 dataset: Number of rows is 10, its value is 10, 11, 12 ... 20 def generator_20(): for i in range(10, 20): @@ -320,7 +321,7 @@ def test_concat_14(): data2 = ds.ImageFolderDatasetV2(DATA_DIR2, num_samples=2) transforms1 = F.ComposeOp([F.Decode(), - F.Resize((224,224)), + F.Resize((224, 224)), F.ToTensor()]) data1 = data1.map(input_columns=["image"], operations=transforms1()) @@ -374,4 +375,4 @@ if __name__ == "__main__": test_concat_12() test_concat_13() test_concat_14() - test_concat_15() \ No newline at end of file + test_concat_15() diff --git a/tests/ut/python/dataset/test_config.py b/tests/ut/python/dataset/test_config.py index 8a7c0f2911..10f00b3606 100644 --- a/tests/ut/python/dataset/test_config.py +++ b/tests/ut/python/dataset/test_config.py @@ -26,10 +26,10 @@ import mindspore.dataset as ds import mindspore.dataset.transforms.vision.c_transforms as vision import mindspore.dataset.transforms.vision.py_transforms as py_vision - DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" + def test_basic(): ds.config.load('../data/dataset/declient.cfg') @@ -114,7 +114,7 @@ def test_deterministic_run_fail(): try: for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): - np.testing.assert_equal (item1["image"], item2["image"]) + np.testing.assert_equal(item1["image"], item2["image"]) except BaseException as e: # two datasets split the number out of the sequence a @@ -147,7 +147,7 @@ def test_deterministic_run_pass(): data2 = data2.map(input_columns=["image"], operations=random_crop_op2) try: for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): - np.testing.assert_equal (item1["image"], item2["image"]) + np.testing.assert_equal(item1["image"], item2["image"]) except BaseException as e: # two datasets both use numbers from the generated sequence "a" logger.info("Got an exception in DE: {}".format(str(e))) @@ -177,7 +177,7 @@ def test_seed_undeterministic(): data2 = data2.map(input_columns=["image"], operations=random_crop_op2) for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): - np.testing.assert_equal (item1["image"], item2["image"]) + np.testing.assert_equal(item1["image"], item2["image"]) def test_deterministic_run_distribution(): @@ -205,7 +205,7 @@ def test_deterministic_run_distribution(): data2 = data2.map(input_columns=["image"], operations=random_crop_op2) for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): - np.testing.assert_equal (item1["image"], item2["image"]) + np.testing.assert_equal(item1["image"], item2["image"]) def test_deterministic_python_seed(): @@ -241,7 +241,7 @@ def test_deterministic_python_seed(): for data_two in data2.create_dict_iterator(): data2_output.append(data_two["image"]) - np.testing.assert_equal (data1_output, data2_output) + np.testing.assert_equal(data1_output, data2_output) def test_deterministic_python_seed_multi_thread(): @@ -277,7 +277,7 @@ def test_deterministic_python_seed_multi_thread(): data2_output.append(data_two["image"]) try: - np.testing.assert_equal (data1_output, data2_output) + np.testing.assert_equal(data1_output, data2_output) except BaseException as e: # expect output to not match during multi-threaded excution logger.info("Got an exception in DE: {}".format(str(e))) diff --git a/tests/ut/python/dataset/test_cut_out.py b/tests/ut/python/dataset/test_cut_out.py index 3b3bf12190..badb2de435 100644 --- a/tests/ut/python/dataset/test_cut_out.py +++ b/tests/ut/python/dataset/test_cut_out.py @@ -121,7 +121,6 @@ def test_cut_out_op_multicut(): num_iter = 0 for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): - num_iter += 1 image_1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8) # C image doesn't require transpose diff --git a/tests/ut/python/dataset/test_datasets_imagenet.py b/tests/ut/python/dataset/test_datasets_imagenet.py index 27a67c5880..80a0705fd3 100644 --- a/tests/ut/python/dataset/test_datasets_imagenet.py +++ b/tests/ut/python/dataset/test_datasets_imagenet.py @@ -204,5 +204,3 @@ if __name__ == '__main__': logger.info('Map then Shuffle') test_case_3() logger.info('\n') - - diff --git a/tests/ut/python/dataset/test_datasets_imagenet_distribution.py b/tests/ut/python/dataset/test_datasets_imagenet_distribution.py index 8afe624fbc..3487562ea0 100644 --- a/tests/ut/python/dataset/test_datasets_imagenet_distribution.py +++ b/tests/ut/python/dataset/test_datasets_imagenet_distribution.py @@ -63,6 +63,7 @@ def test_tf_file_distribution_unique(): logger.info("Number of data in data1: {}".format(num_iter)) assert num_iter == 4 + def test_tf_file_distribution_random(): data1 = ds.StorageDataset(DATA_DIR, SCHEMA_DIR, DISTRIBUTION_RANDOM_DIR) data1 = data1.repeat(1) @@ -73,6 +74,7 @@ def test_tf_file_distribution_random(): logger.info("Number of data in data1: {}".format(num_iter)) assert num_iter == 4 + def test_tf_file_distribution_equal_rows(): data1 = ds.StorageDataset(DATA_DIR, SCHEMA_DIR, DISTRIBUTION_EQUAL_DIR) data1 = data1.repeat(2) @@ -82,6 +84,7 @@ def test_tf_file_distribution_equal_rows(): assert num_iter == 4 + if __name__ == '__main__': logger.info('=======test normal=======') test_tf_file_normal() diff --git a/tests/ut/python/dataset/test_datasets_sharding.py b/tests/ut/python/dataset/test_datasets_sharding.py index b398391fb7..825ceb661a 100644 --- a/tests/ut/python/dataset/test_datasets_sharding.py +++ b/tests/ut/python/dataset/test_datasets_sharding.py @@ -15,13 +15,14 @@ import mindspore.dataset as ds from mindspore import log as logger + def test_imagefolder_shardings(print_res=False): image_folder_dir = "../data/dataset/testPK/data" def sharding_config(num_shards, shard_id, num_samples, shuffle, class_index, repeat_cnt=1): data1 = ds.ImageFolderDatasetV2(image_folder_dir, num_samples=num_samples, num_shards=num_shards, - shard_id=shard_id, - shuffle=shuffle, class_indexing=class_index, decode=True) + shard_id=shard_id, + shuffle=shuffle, class_indexing=class_index, decode=True) data1 = data1.repeat(repeat_cnt) res = [] for item in data1.create_dict_iterator(): # each data is a dictionary @@ -48,7 +49,7 @@ def test_manifest_shardings(print_res=False): def sharding_config(num_shards, shard_id, num_samples, shuffle, repeat_cnt=1): data1 = ds.ManifestDataset(manifest_file, num_samples=num_samples, num_shards=num_shards, shard_id=shard_id, - shuffle=shuffle, decode=True) + shuffle=shuffle, decode=True) data1 = data1.repeat(repeat_cnt) res = [] for item in data1.create_dict_iterator(): # each data is a dictionary @@ -101,7 +102,7 @@ def test_cifar10_shardings(print_res=False): def sharding_config(num_shards, shard_id, num_samples, shuffle, repeat_cnt=1): data1 = ds.Cifar10Dataset(cifar10_dir, num_shards=num_shards, shard_id=shard_id, num_samples=num_samples, - shuffle=shuffle) + shuffle=shuffle) data1 = data1.repeat(repeat_cnt) res = [] for item in data1.create_dict_iterator(): # each data is a dictionary @@ -121,7 +122,7 @@ def test_cifar100_shardings(print_res=False): def sharding_config(num_shards, shard_id, num_samples, shuffle, repeat_cnt=1): data1 = ds.Cifar100Dataset(cifar100_dir, num_shards=num_shards, shard_id=shard_id, num_samples=num_samples, - shuffle=shuffle) + shuffle=shuffle) data1 = data1.repeat(repeat_cnt) res = [] for item in data1.create_dict_iterator(): # each data is a dictionary @@ -140,7 +141,7 @@ def test_mnist_shardings(print_res=False): def sharding_config(num_shards, shard_id, num_samples, shuffle, repeat_cnt=1): data1 = ds.MnistDataset(mnist_dir, num_shards=num_shards, shard_id=shard_id, num_samples=num_samples, - shuffle=shuffle) + shuffle=shuffle) data1 = data1.repeat(repeat_cnt) res = [] for item in data1.create_dict_iterator(): # each data is a dictionary diff --git a/tests/ut/python/dataset/test_datasets_textfileop.py b/tests/ut/python/dataset/test_datasets_textfileop.py index b13a86d80b..b255180278 100644 --- a/tests/ut/python/dataset/test_datasets_textfileop.py +++ b/tests/ut/python/dataset/test_datasets_textfileop.py @@ -19,13 +19,15 @@ import mindspore.dataset.transforms.nlp.utils as nlp DATA_FILE = "../data/dataset/testTextFileDataset/1.txt" DATA_ALL_FILE = "../data/dataset/testTextFileDataset/*" + def test_textline_dataset_one_file(): data = ds.TextFileDataset(DATA_FILE) count = 0 for i in data.create_dict_iterator(): logger.info("{}".format(i["text"])) count += 1 - assert(count == 3) + assert (count == 3) + def test_textline_dataset_all_file(): data = ds.TextFileDataset(DATA_ALL_FILE) @@ -33,7 +35,8 @@ def test_textline_dataset_all_file(): for i in data.create_dict_iterator(): logger.info("{}".format(i["text"])) count += 1 - assert(count == 5) + assert (count == 5) + def test_textline_dataset_totext(): ds.config.set_num_parallel_workers(4) @@ -42,23 +45,26 @@ def test_textline_dataset_totext(): line = ["This is a text file.", "Another file.", "Be happy every day.", "End of file.", "Good luck to everyone."] for i in data.create_dict_iterator(): str = i["text"].item().decode("utf8") - assert(str == line[count]) + assert (str == line[count]) count += 1 - assert(count == 5) + assert (count == 5) + def test_textline_dataset_num_samples(): data = ds.TextFileDataset(DATA_FILE, num_samples=2) count = 0 for i in data.create_dict_iterator(): count += 1 - assert(count == 2) + assert (count == 2) + def test_textline_dataset_distribution(): data = ds.TextFileDataset(DATA_ALL_FILE, num_shards=2, shard_id=1) count = 0 for i in data.create_dict_iterator(): count += 1 - assert(count == 3) + assert (count == 3) + def test_textline_dataset_repeat(): data = ds.TextFileDataset(DATA_FILE, shuffle=False) @@ -69,14 +75,16 @@ def test_textline_dataset_repeat(): "This is a text file.", "Be happy every day.", "Good luck to everyone."] for i in data.create_dict_iterator(): str = i["text"].item().decode("utf8") - assert(str == line[count]) + assert (str == line[count]) count += 1 - assert(count == 9) + assert (count == 9) + def test_textline_dataset_get_datasetsize(): data = ds.TextFileDataset(DATA_FILE) size = data.get_dataset_size() - assert(size == 3) + assert (size == 3) + if __name__ == "__main__": test_textline_dataset_one_file() diff --git a/tests/ut/python/dataset/test_datasets_voc.py b/tests/ut/python/dataset/test_datasets_voc.py index ae3a2d1c0b..3dd71ec90e 100644 --- a/tests/ut/python/dataset/test_datasets_voc.py +++ b/tests/ut/python/dataset/test_datasets_voc.py @@ -20,6 +20,7 @@ DATA_DIR = "../data/dataset/testVOC2012" IMAGE_SHAPE = [2268, 2268, 2268, 2268, 642, 607, 561, 596, 612, 2268] TARGET_SHAPE = [680, 680, 680, 680, 642, 607, 561, 596, 612, 680] + def test_voc_segmentation(): data1 = ds.VOCDataset(DATA_DIR, task="Segmentation", mode="train", decode=True, shuffle=False) num = 0 @@ -29,52 +30,56 @@ def test_voc_segmentation(): num += 1 assert (num == 10) + def test_voc_detection(): data1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False) num = 0 - count = [ 0, 0, 0, 0, 0, 0 ] + count = [0, 0, 0, 0, 0, 0] for item in data1.create_dict_iterator(): assert (item["image"].shape[0] == IMAGE_SHAPE[num]) for bbox in item["annotation"]: count[bbox[0]] += 1 num += 1 assert (num == 9) - assert (count == [3,2,1,2,4,3]) + assert (count == [3, 2, 1, 2, 4, 3]) + def test_voc_class_index(): - class_index = { 'car': 0, 'cat': 1, 'train': 5 } + class_index = {'car': 0, 'cat': 1, 'train': 5} data1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", class_indexing=class_index, decode=True) class_index1 = data1.get_class_indexing() - assert (class_index1 == { 'car': 0, 'cat': 1, 'train': 5 }) + assert (class_index1 == {'car': 0, 'cat': 1, 'train': 5}) data1 = data1.shuffle(4) class_index2 = data1.get_class_indexing() - assert (class_index2 == { 'car': 0, 'cat': 1, 'train': 5 }) + assert (class_index2 == {'car': 0, 'cat': 1, 'train': 5}) num = 0 - count = [0,0,0,0,0,0] + count = [0, 0, 0, 0, 0, 0] for item in data1.create_dict_iterator(): for bbox in item["annotation"]: assert (bbox[0] == 0 or bbox[0] == 1 or bbox[0] == 5) count[bbox[0]] += 1 num += 1 assert (num == 6) - assert (count == [3,2,0,0,0,3]) + assert (count == [3, 2, 0, 0, 0, 3]) + def test_voc_get_class_indexing(): data1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True) class_index1 = data1.get_class_indexing() - assert (class_index1 == { 'car': 0, 'cat': 1, 'chair': 2, 'dog': 3, 'person': 4, 'train': 5 }) + assert (class_index1 == {'car': 0, 'cat': 1, 'chair': 2, 'dog': 3, 'person': 4, 'train': 5}) data1 = data1.shuffle(4) class_index2 = data1.get_class_indexing() - assert (class_index2 == { 'car': 0, 'cat': 1, 'chair': 2, 'dog': 3, 'person': 4, 'train': 5 }) + assert (class_index2 == {'car': 0, 'cat': 1, 'chair': 2, 'dog': 3, 'person': 4, 'train': 5}) num = 0 - count = [0,0,0,0,0,0] + count = [0, 0, 0, 0, 0, 0] for item in data1.create_dict_iterator(): for bbox in item["annotation"]: assert (bbox[0] == 0 or bbox[0] == 1 or bbox[0] == 2 or bbox[0] == 3 or bbox[0] == 4 or bbox[0] == 5) count[bbox[0]] += 1 num += 1 assert (num == 9) - assert (count == [3,2,1,2,4,3]) + assert (count == [3, 2, 1, 2, 4, 3]) + def test_case_0(): data1 = ds.VOCDataset(DATA_DIR, task="Segmentation", mode="train", decode=True) @@ -93,6 +98,7 @@ def test_case_0(): num += 1 assert (num == 20) + def test_case_1(): data1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True) @@ -109,6 +115,7 @@ def test_case_1(): num += 1 assert (num == 18) + def test_voc_exception(): try: data1 = ds.VOCDataset(DATA_DIR, task="InvalidTask", mode="train", decode=True) @@ -119,7 +126,7 @@ def test_voc_exception(): pass try: - data2 = ds.VOCDataset(DATA_DIR, task="Segmentation", mode="train", class_indexing={ "cat":0 }, decode=True) + data2 = ds.VOCDataset(DATA_DIR, task="Segmentation", mode="train", class_indexing={"cat": 0}, decode=True) for _ in data2.create_dict_iterator(): pass assert False @@ -158,6 +165,7 @@ def test_voc_exception(): except RuntimeError: pass + if __name__ == '__main__': test_voc_segmentation() test_voc_detection() diff --git a/tests/ut/python/dataset/test_decode.py b/tests/ut/python/dataset/test_decode.py index 47426812a7..5d9d1f39f5 100644 --- a/tests/ut/python/dataset/test_decode.py +++ b/tests/ut/python/dataset/test_decode.py @@ -22,7 +22,6 @@ import mindspore.dataset as ds from mindspore import log as logger from util import diff_mse - DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" diff --git a/tests/ut/python/dataset/test_deviceop_cpu.py b/tests/ut/python/dataset/test_deviceop_cpu.py index 0ee443ed9e..21262fe8ea 100644 --- a/tests/ut/python/dataset/test_deviceop_cpu.py +++ b/tests/ut/python/dataset/test_deviceop_cpu.py @@ -22,6 +22,7 @@ SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" TF_FILES = ["../data/dataset/testTFTestAllTypes/test.data"] TF_SCHEMA_FILE = "../data/dataset/testTFTestAllTypes/datasetSchema.json" + def test_case_0(): """ Test Repeat diff --git a/tests/ut/python/dataset/test_equalize.py b/tests/ut/python/dataset/test_equalize.py index 077c316d67..c98d2e67d3 100644 --- a/tests/ut/python/dataset/test_equalize.py +++ b/tests/ut/python/dataset/test_equalize.py @@ -37,65 +37,64 @@ def visualize(image_original, image_equalize): plt.title("DE Color Equalized image") plt.show() - + def test_equalize(plot=False): """ Test Equalize """ logger.info("Test Equalize") - + # Original Images - ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) - + ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) + transforms_original = F.ComposeOp([F.Decode(), - F.Resize((224,224)), - F.ToTensor()]) - + F.Resize((224, 224)), + F.ToTensor()]) + ds_original = ds.map(input_columns="image", operations=transforms_original()) - + ds_original = ds_original.batch(512) - - for idx, (image,label) in enumerate(ds_original): + + for idx, (image, label) in enumerate(ds_original): if idx == 0: - images_original = np.transpose(image, (0, 2,3,1)) + images_original = np.transpose(image, (0, 2, 3, 1)) else: images_original = np.append(images_original, - np.transpose(image, (0, 2,3,1)), - axis=0) + np.transpose(image, (0, 2, 3, 1)), + axis=0) + + # Color Equalized Images + ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) - # Color Equalized Images - ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) - transforms_equalize = F.ComposeOp([F.Decode(), - F.Resize((224,224)), + F.Resize((224, 224)), F.Equalize(), - F.ToTensor()]) - + F.ToTensor()]) + ds_equalize = ds.map(input_columns="image", - operations=transforms_equalize()) - - ds_equalize = ds_equalize.batch(512) - - for idx, (image,label) in enumerate(ds_equalize): + operations=transforms_equalize()) + + ds_equalize = ds_equalize.batch(512) + + for idx, (image, label) in enumerate(ds_equalize): if idx == 0: - images_equalize = np.transpose(image, (0, 2,3,1)) + images_equalize = np.transpose(image, (0, 2, 3, 1)) else: images_equalize = np.append(images_equalize, - np.transpose(image, (0, 2,3,1)), - axis=0) - + np.transpose(image, (0, 2, 3, 1)), + axis=0) + num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): - mse[i] = np.mean((images_equalize[i]-images_original[i])**2) + mse[i] = np.mean((images_equalize[i] - images_original[i]) ** 2) logger.info("MSE= {}".format(str(np.mean(mse)))) - + if plot: visualize(images_original, images_equalize) - + if __name__ == "__main__": test_equalize(plot=True) - diff --git a/tests/ut/python/dataset/test_exceptions.py b/tests/ut/python/dataset/test_exceptions.py index 0cce60fa5c..1fb2aef998 100644 --- a/tests/ut/python/dataset/test_exceptions.py +++ b/tests/ut/python/dataset/test_exceptions.py @@ -17,7 +17,6 @@ import mindspore.dataset as ds import mindspore.dataset.transforms.vision.c_transforms as vision from mindspore import log as logger - DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" @@ -33,6 +32,7 @@ def test_exception_01(): data = data.map(input_columns=["image"], operations=vision.Resize(100, 100)) assert "Invalid interpolation mode." in str(info.value) + def test_exception_02(): """ Test multiple exceptions with invalid input diff --git a/tests/ut/python/dataset/test_filterop.py b/tests/ut/python/dataset/test_filterop.py index 90f512caa4..2550970c8e 100644 --- a/tests/ut/python/dataset/test_filterop.py +++ b/tests/ut/python/dataset/test_filterop.py @@ -22,6 +22,8 @@ from mindspore import log as logger DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" + + # test for predicate def test_diff_predicate_func(): def test_filter(predicate_func): @@ -50,6 +52,7 @@ def test_diff_predicate_func(): test_filter(lambda image, label: label == np.array([3])) test_filter(lambda image, label: label == np.array(3)) + def filter_func_ge(data): if data > 10: return False @@ -60,6 +63,7 @@ def generator_1d(): for i in range(64): yield (np.array(i),) + # test with GeneratorDataset def test_filter_by_generator_with_no(): dataset = ds.GeneratorDataset(generator_1d, ["data"]) @@ -70,6 +74,7 @@ def test_filter_by_generator_with_no(): assert item["data"] == expected_rs[num_iter] num_iter += 1 + # test with repeatOp before def test_filter_by_generator_with_repeat(): dataset = ds.GeneratorDataset(generator_1d, ["data"]) @@ -84,8 +89,9 @@ def test_filter_by_generator_with_repeat(): assert num_iter == 44 for i in range(4): for ii in range(len(expected_rs)): - index = i * len(expected_rs) + ii - assert ret_data[index] == expected_rs[ii] + index = i * len(expected_rs) + ii + assert ret_data[index] == expected_rs[ii] + # test with repeatOp after def test_filter_by_generator_with_repeat_after(): @@ -101,19 +107,22 @@ def test_filter_by_generator_with_repeat_after(): assert num_iter == 44 for i in range(4): for ii in range(len(expected_rs)): - index = i * len(expected_rs) + ii - assert ret_data[index] == expected_rs[ii] + index = i * len(expected_rs) + ii + assert ret_data[index] == expected_rs[ii] + def filter_func_batch(data): if data[0] > 8: return False return True + def filter_func_batch_after(data): if data > 20: return False return True + # test with batchOp before def test_filter_by_generator_with_batch(): dataset = ds.GeneratorDataset(generator_1d, ["data"]) @@ -129,6 +138,7 @@ def test_filter_by_generator_with_batch(): assert ret_data[1][0] == 4 assert ret_data[2][0] == 8 + # test with batchOp after def test_filter_by_generator_with_batch_after(): dataset = ds.GeneratorDataset(generator_1d, ["data"]) @@ -150,6 +160,7 @@ def filter_func_shuffle(data): return False return True + # test with batchOp before def test_filter_by_generator_with_shuffle(): dataset = ds.GeneratorDataset(generator_1d, ["data"]) @@ -157,7 +168,7 @@ def test_filter_by_generator_with_shuffle(): dataset_f = dataset_s.filter(predicate=filter_func_shuffle, num_parallel_workers=4) num_iter = 0 for item in dataset_f.create_dict_iterator(): - num_iter += 1 + num_iter += 1 assert num_iter == 21 @@ -166,6 +177,7 @@ def filter_func_shuffle_after(data): return False return True + # test with batchOp after def test_filter_by_generator_with_shuffle_after(): dataset = ds.GeneratorDataset(generator_1d, ["data"]) @@ -184,19 +196,21 @@ def generator_1d_zip1(): def generator_1d_zip2(): for i in range(64): - yield (np.array(i+100),) + yield (np.array(i + 100),) def filter_func_zip(data1, data2): - if data1 > 20: + if data1 > 20: return False return True + def filter_func_zip_after(data1): - if data1 > 20: + if data1 > 20: return False return True + # test with zipOp before def test_filter_by_generator_with_zip(): dataset1 = ds.GeneratorDataset(generator_1d_zip1, ["data1"]) @@ -207,12 +221,12 @@ def test_filter_by_generator_with_zip(): ret_data = [] for item in dataset_f.create_dict_iterator(): num_iter += 1 - ret_data.append({"data1": item["data1"], "data2":item["data2"]}) + ret_data.append({"data1": item["data1"], "data2": item["data2"]}) assert num_iter == 21 - assert ret_data[0]["data1"] == 0 - assert ret_data[0]["data2"] == 100 - assert ret_data[5]["data1"] == 5 - assert ret_data[5]["data2"] == 105 + assert ret_data[0]["data1"] == 0 + assert ret_data[0]["data2"] == 100 + assert ret_data[5]["data1"] == 5 + assert ret_data[5]["data2"] == 105 # test with zipOp after @@ -226,12 +240,12 @@ def test_filter_by_generator_with_zip_after(): ret_data = [] for item in dataz.create_dict_iterator(): num_iter += 1 - ret_data.append({"data1": item["data1"], "data2":item["data2"]}) + ret_data.append({"data1": item["data1"], "data2": item["data2"]}) assert num_iter == 21 - assert ret_data[0]["data1"] == 0 - assert ret_data[0]["data2"] == 0 - assert ret_data[5]["data1"] == 5 - assert ret_data[5]["data2"] == 5 + assert ret_data[0]["data1"] == 0 + assert ret_data[0]["data2"] == 0 + assert ret_data[5]["data1"] == 5 + assert ret_data[5]["data2"] == 5 def filter_func_map(col1, col2): @@ -242,7 +256,7 @@ def filter_func_map(col1, col2): def filter_func_map_part(col1): if col1 < 3: - return True + return True else: return False @@ -250,6 +264,7 @@ def filter_func_map_part(col1): def filter_func_map_all(col1, col2): return True + def generator_mc(maxid=20): for i in range(maxid): yield (np.array([i]), np.array([[i, i + 1], [i + 2, i + 3]])) @@ -262,12 +277,13 @@ def func_map(data_col1, data_col2): def func_map_part(data_col1): return (data_col1) + # test with map def test_filter_by_generator_with_map_all_col(): dataset = ds.GeneratorDataset(generator_mc(12), ["col1", "col2"]) - dataset_map = dataset.map( input_columns=["col1"], output_columns=["col1"] , operations=func_map_part) + dataset_map = dataset.map(input_columns=["col1"], output_columns=["col1"], operations=func_map_part) # dataset_map = dataset.map( operations=func_map_part) - dataset_f = dataset_map.filter(input_columns=["col1"], predicate=filter_func_map_part, num_parallel_workers=1) + dataset_f = dataset_map.filter(input_columns=["col1"], predicate=filter_func_map_part, num_parallel_workers=1) num_iter = 0 ret_data = [] for item in dataset_f.create_dict_iterator(): @@ -277,28 +293,30 @@ def test_filter_by_generator_with_map_all_col(): assert ret_data[0] == 0 assert ret_data[1] == 1 + # test with map def test_filter_by_generator_with_map_part_col(): dataset = ds.GeneratorDataset(generator_mc(12), ["col1", "col2"]) - dataset_map = dataset.map( input_columns=["col1"], output_columns=["out1"] , operations=func_map_part) - + dataset_map = dataset.map(input_columns=["col1"], output_columns=["out1"], operations=func_map_part) + dataset_f = dataset_map.filter(input_columns=["out1", "col2"], predicate=filter_func_map, num_parallel_workers=4) num_iter = 0 ret_data = [] for item in dataset_f.create_dict_iterator(): - num_iter += 1 - print(item) - ret_data.append(item["out1"]) + num_iter += 1 + print(item) + ret_data.append(item["out1"]) assert num_iter == 3 assert ret_data[0] == 9 assert ret_data[2] == 11 def filter_func_rename(data): - if data> 8: + if data > 8: return True return False + # test with rename before def test_filter_by_generator_with_rename(): dataset = ds.GeneratorDataset(generator_1d, ["data"]) @@ -314,91 +332,101 @@ def test_filter_by_generator_with_rename(): assert ret_data[54] == 63 -#test input_column +# test input_column def filter_func_input_column1(col1, col2): if col1[0] < 8: return True return False + def filter_func_input_column2(col1): if col1[0] < 8: return True return False + def filter_func_input_column3(col1): return True + # test with input_columns def test_filter_by_generator_with_input_column(): dataset = ds.GeneratorDataset(generator_mc(64), ["col1", "col2"]) - dataset_map = dataset.map( input_columns=["col1"], output_columns=["out1"] , operations=func_map_part) - dataset_f1 = dataset_map.filter(input_columns=["out1", "col2"], predicate=filter_func_input_column1, num_parallel_workers=4) + dataset_map = dataset.map(input_columns=["col1"], output_columns=["out1"], operations=func_map_part) + dataset_f1 = dataset_map.filter(input_columns=["out1", "col2"], predicate=filter_func_input_column1, + num_parallel_workers=4) dataset_f2 = dataset_f1.filter(input_columns=["out1"], predicate=filter_func_input_column2, num_parallel_workers=4) - dataset_f3 = dataset_f2.filter(input_columns=["col2"], predicate=filter_func_input_column3, num_parallel_workers=4) + dataset_f3 = dataset_f2.filter(input_columns=["col2"], predicate=filter_func_input_column3, num_parallel_workers=4) dataset_f4 = dataset_f3.filter(predicate=filter_func_input_column1, num_parallel_workers=4) num_iter = 0 ret_data = [] for item in dataset_f4.create_dict_iterator(): - num_iter += 1 - ret_data.append(item["out1"]) + num_iter += 1 + ret_data.append(item["out1"]) assert num_iter == 8 assert ret_data[0] == 0 assert ret_data[7] == 7 -#test kFilterPartial +# test kFilterPartial def generator_mc_p0(maxid=20): for i in range(maxid): - yield (np.array([i ]), np.array([i + 100])) + yield (np.array([i]), np.array([i + 100])) + def generator_mc_p1(maxid=20): for i in range(maxid): - yield (np.array([i + 200 ]), np.array([i + 300])) + yield (np.array([i + 200]), np.array([i + 300])) def filter_func_Partial_0(col1, col2, col3, col4): - filter_data = [0,1,2,3,4, 11] - if col1[0] in filter_data: + filter_data = [0, 1, 2, 3, 4, 11] + if col1[0] in filter_data: return False return True + # test with row_data_buffer > 1 def test_filter_by_generator_Partial0(): ds.config.load('../data/dataset/declient_filter.cfg') - dataset1= ds.GeneratorDataset(source = generator_mc_p0(), column_names = ["col1", "col2"]) - dataset2 = ds.GeneratorDataset(source = generator_mc_p1(), column_names = ["col3", "col4"]) + dataset1 = ds.GeneratorDataset(source=generator_mc_p0(), column_names=["col1", "col2"]) + dataset2 = ds.GeneratorDataset(source=generator_mc_p1(), column_names=["col3", "col4"]) dataset_zip = ds.zip((dataset1, dataset2)) dataset_f1 = dataset_zip.filter(predicate=filter_func_Partial_0, num_parallel_workers=2) ret = [] for item in dataset_f1.create_dict_iterator(): - ret.append(item["col1"]) + ret.append(item["col1"]) assert ret[0] == 5 assert ret[6] == 12 + # test with row_data_buffer > 1 def test_filter_by_generator_Partial1(): ds.config.load('../data/dataset/declient_filter.cfg') - dataset1= ds.GeneratorDataset(source = generator_mc_p0(), column_names = ["col1", "col2"]) - dataset2 = ds.GeneratorDataset(source = generator_mc_p1(), column_names = ["col3", "col4"]) + dataset1 = ds.GeneratorDataset(source=generator_mc_p0(), column_names=["col1", "col2"]) + dataset2 = ds.GeneratorDataset(source=generator_mc_p1(), column_names=["col3", "col4"]) dataset_zip = ds.zip((dataset1, dataset2)) dataset_f1 = dataset_zip.filter(predicate=filter_func_Partial_0, num_parallel_workers=2) - dataset_map = dataset_f1.map( input_columns=["col1"], output_columns=["out1"] , operations=lambda x1: x1 + 400) + dataset_map = dataset_f1.map(input_columns=["col1"], output_columns=["out1"], operations=lambda x1: x1 + 400) ret = [] for item in dataset_map.create_dict_iterator(): ret.append(item["out1"]) assert ret[0] == 405 assert ret[6] == 412 + # test with row_data_buffer > 1 def test_filter_by_generator_Partial2(): ds.config.load('../data/dataset/declient_filter.cfg') - dataset1= ds.GeneratorDataset(source = generator_mc_p0(), column_names = ["col1", "col2"]) - dataset2 = ds.GeneratorDataset(source = generator_mc_p1(), column_names = ["col3", "col4"]) + dataset1 = ds.GeneratorDataset(source=generator_mc_p0(), column_names=["col1", "col2"]) + dataset2 = ds.GeneratorDataset(source=generator_mc_p1(), column_names=["col3", "col4"]) - dataset1f = dataset1.filter( input_columns= ["col1"], predicate=lambda x: x not in [3,7,9], num_parallel_workers=2) - dataset2f = dataset2.filter( input_columns= ["col3"], predicate=lambda x: x not in [203,207,209], num_parallel_workers=2) + dataset1f = dataset1.filter(input_columns=["col1"], predicate=lambda x: x not in [3, 7, 9], num_parallel_workers=2) + dataset2f = dataset2.filter(input_columns=["col3"], predicate=lambda x: x not in [203, 207, 209], + num_parallel_workers=2) dataset_zip = ds.zip((dataset1f, dataset2f)) - dataset_map = dataset_zip.map( input_columns=["col1", "col3"], output_columns=["out1", "out3"] , operations=lambda x1,x3: (x1 + 400, x3+500)) + dataset_map = dataset_zip.map(input_columns=["col1", "col3"], output_columns=["out1", "out3"], + operations=lambda x1, x3: (x1 + 400, x3 + 500)) ret1 = [] ret3 = [] for item in dataset_map.create_dict_iterator(): @@ -411,58 +439,65 @@ def test_filter_by_generator_Partial2(): def filter_func_Partial(col1, col2): - if col1[0] % 3 == 0: + if col1[0] % 3 == 0: return True return False + def generator_big(maxid=20): for i in range(maxid): yield (np.array([i]), np.array([[i, i + 1], [i + 2, i + 3]])) + # test with row_data_buffer > 1 def test_filter_by_generator_Partial(): ds.config.load('../data/dataset/declient_filter.cfg') - dataset = ds.GeneratorDataset(source = generator_mc(99), column_names = ["col1", "col2"]) + dataset = ds.GeneratorDataset(source=generator_mc(99), column_names=["col1", "col2"]) dataset_s = dataset.shuffle(4) dataset_f1 = dataset_s.filter(input_columns=["col1", "col2"], predicate=filter_func_Partial, num_parallel_workers=1) for item in dataset_f1.create_dict_iterator(): - assert item["col1"] % 3 == 0 + assert item["col1"] % 3 == 0 def filter_func_cifar(col1, col2): - if col2 % 3 == 0: + if col2 % 3 == 0: return True return False + # test with cifar10 def test_filte_case_dataset_cifar10(): DATA_DIR_10 = "../data/dataset/testCifar10Data" ds.config.load('../data/dataset/declient_filter.cfg') - dataset_c = ds.Cifar10Dataset(dataset_dir = DATA_DIR_10, num_samples = 100000, shuffle=False) + dataset_c = ds.Cifar10Dataset(dataset_dir=DATA_DIR_10, num_samples=100000, shuffle=False) dataset_f1 = dataset_c.filter(input_columns=["image", "label"], predicate=filter_func_cifar, num_parallel_workers=1) num_iter = 0 for item in dataset_f1.create_dict_iterator(): # in this example, each dictionary has keys "image" and "label" assert item["label"] % 3 == 0 -# column id sort + +# column id sort def generator_sort1(maxid=20): for i in range(maxid): yield (np.array([i]), np.array([i + 100]), np.array([i + 200])) + def generator_sort2(maxid=20): for i in range(maxid): - yield (np.array([i + 300]), np.array([i + 400]), np.array([i + 500])) + yield (np.array([i + 300]), np.array([i + 400]), np.array([i + 500])) def filter_func_part_sort(col1, col2, col3, col4, col5, col6): return True + def filter_func_map_sort(col1, col2, col3): return (col1, col2, col3) + def test_filter_by_generator_with_map_all_sort(): dataset1 = ds.GeneratorDataset(generator_sort1(10), ["col1", "col2", "col3"]) dataset2 = ds.GeneratorDataset(generator_sort2(10), ["col4 ", "col5", "col6"]) @@ -480,7 +515,6 @@ def test_filter_by_generator_with_map_all_sort(): assert ret_data[9]["col6"] == 509 - if __name__ == '__main__': test_diff_predicate_func() test_filte_case_dataset_cifar10() diff --git a/tests/ut/python/dataset/test_generator.py b/tests/ut/python/dataset/test_generator.py index 529788fcaa..78d801a7a1 100644 --- a/tests/ut/python/dataset/test_generator.py +++ b/tests/ut/python/dataset/test_generator.py @@ -525,7 +525,7 @@ def test_sequential_sampler(): def test_random_sampler(): source = [(np.array([x]),) for x in range(64)] - ds1 = ds.GeneratorDataset(source, ["data"], shuffle = True) + ds1 = ds.GeneratorDataset(source, ["data"], shuffle=True) for data in ds1.create_dict_iterator(): # each data is a dictionary pass @@ -533,7 +533,7 @@ def test_random_sampler(): def test_distributed_sampler(): source = [(np.array([x]),) for x in range(64)] for sid in range(8): - ds1 = ds.GeneratorDataset(source, ["data"], shuffle = False, num_shards=8, shard_id=sid) + ds1 = ds.GeneratorDataset(source, ["data"], shuffle=False, num_shards=8, shard_id=sid) i = sid for data in ds1.create_dict_iterator(): # each data is a dictionary golden = np.array([i]) @@ -544,9 +544,9 @@ def test_distributed_sampler(): def test_num_samples(): source = [(np.array([x]),) for x in range(64)] num_samples = 32 - ds1 = ds.GeneratorDataset(source, ["data"], sampler=ds.SequentialSampler(), num_samples = num_samples) - ds2 = ds.GeneratorDataset(source, ["data"], sampler=[i for i in range(32)], num_samples = num_samples) - ds3 = ds.GeneratorDataset(generator_1d, ["data"], num_samples = num_samples) + ds1 = ds.GeneratorDataset(source, ["data"], sampler=ds.SequentialSampler(), num_samples=num_samples) + ds2 = ds.GeneratorDataset(source, ["data"], sampler=[i for i in range(32)], num_samples=num_samples) + ds3 = ds.GeneratorDataset(generator_1d, ["data"], num_samples=num_samples) count = 0 for _ in ds1.create_dict_iterator(): @@ -567,8 +567,8 @@ def test_num_samples(): def test_num_samples_underflow(): source = [(np.array([x]),) for x in range(64)] num_samples = 256 - ds2 = ds.GeneratorDataset(source, ["data"], sampler=[i for i in range(64)], num_samples = num_samples) - ds3 = ds.GeneratorDataset(generator_1d, ["data"], num_samples = num_samples) + ds2 = ds.GeneratorDataset(source, ["data"], sampler=[i for i in range(64)], num_samples=num_samples) + ds3 = ds.GeneratorDataset(generator_1d, ["data"], num_samples=num_samples) count = 0 for _ in ds2.create_dict_iterator(): @@ -662,5 +662,3 @@ if __name__ == "__main__": test_distributed_sampler() test_random_sampler() test_schema() - - diff --git a/tests/ut/python/dataset/test_invert.py b/tests/ut/python/dataset/test_invert.py index a1bfd63431..2faafa292b 100644 --- a/tests/ut/python/dataset/test_invert.py +++ b/tests/ut/python/dataset/test_invert.py @@ -21,6 +21,7 @@ import mindspore.dataset.transforms.vision.py_transforms as F DATA_DIR = "../data/dataset/testImageNetData/train/" + def visualize(image_original, image_invert): """ visualizes the image using DE op and Numpy op @@ -36,65 +37,64 @@ def visualize(image_original, image_invert): plt.title("DE Color Inverted image") plt.show() - + def test_invert(plot=False): """ Test Invert """ logger.info("Test Invert") - + # Original Images - ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) - + ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) + transforms_original = F.ComposeOp([F.Decode(), - F.Resize((224,224)), - F.ToTensor()]) - + F.Resize((224, 224)), + F.ToTensor()]) + ds_original = ds.map(input_columns="image", operations=transforms_original()) - + ds_original = ds_original.batch(512) - - for idx, (image,label) in enumerate(ds_original): + + for idx, (image, label) in enumerate(ds_original): if idx == 0: - images_original = np.transpose(image, (0, 2,3,1)) + images_original = np.transpose(image, (0, 2, 3, 1)) else: images_original = np.append(images_original, - np.transpose(image, (0, 2,3,1)), - axis=0) + np.transpose(image, (0, 2, 3, 1)), + axis=0) + + # Color Inverted Images + ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) - # Color Inverted Images - ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) - transforms_invert = F.ComposeOp([F.Decode(), - F.Resize((224,224)), + F.Resize((224, 224)), F.Invert(), - F.ToTensor()]) - + F.ToTensor()]) + ds_invert = ds.map(input_columns="image", - operations=transforms_invert()) - - ds_invert = ds_invert.batch(512) - - for idx, (image,label) in enumerate(ds_invert): + operations=transforms_invert()) + + ds_invert = ds_invert.batch(512) + + for idx, (image, label) in enumerate(ds_invert): if idx == 0: - images_invert = np.transpose(image, (0, 2,3,1)) + images_invert = np.transpose(image, (0, 2, 3, 1)) else: images_invert = np.append(images_invert, - np.transpose(image, (0, 2,3,1)), + np.transpose(image, (0, 2, 3, 1)), axis=0) - + num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): - mse[i] = np.mean((images_invert[i]-images_original[i])**2) + mse[i] = np.mean((images_invert[i] - images_original[i]) ** 2) logger.info("MSE= {}".format(str(np.mean(mse)))) - + if plot: visualize(images_original, images_invert) - + if __name__ == "__main__": test_invert(plot=True) - diff --git a/tests/ut/python/dataset/test_iterator.py b/tests/ut/python/dataset/test_iterator.py index 58beecbe16..d61405a352 100644 --- a/tests/ut/python/dataset/test_iterator.py +++ b/tests/ut/python/dataset/test_iterator.py @@ -110,4 +110,4 @@ def test_tree_copy(): if __name__ == '__main__': - test_tree_copy() \ No newline at end of file + test_tree_copy() diff --git a/tests/ut/python/dataset/test_minddataset.py b/tests/ut/python/dataset/test_minddataset.py index 02cad1e6ca..ca252351e7 100644 --- a/tests/ut/python/dataset/test_minddataset.py +++ b/tests/ut/python/dataset/test_minddataset.py @@ -37,7 +37,8 @@ CV2_FILE_NAME = "../data/mindrecord/imagenet2.mindrecord" CV_DIR_NAME = "../data/mindrecord/testImageNetData" NLP_FILE_NAME = "../data/mindrecord/aclImdb.mindrecord" NLP_FILE_POS = "../data/mindrecord/testAclImdbData/pos" -NLP_FILE_VOCAB= "../data/mindrecord/testAclImdbData/vocab.txt" +NLP_FILE_VOCAB = "../data/mindrecord/testAclImdbData/vocab.txt" + @pytest.fixture def add_and_remove_cv_file(): @@ -62,6 +63,7 @@ def add_and_remove_cv_file(): os.remove("{}".format(x)) os.remove("{}.db".format(x)) + @pytest.fixture def add_and_remove_nlp_file(): """add/remove nlp file""" @@ -79,9 +81,9 @@ def add_and_remove_nlp_file(): "input_ids": {"type": "int64", "shape": [-1]}, "input_mask": {"type": "int64", - "shape": [1, -1]}, + "shape": [1, -1]}, "segment_ids": {"type": "int64", - "shape": [2,-1]} + "shape": [2, -1]} } writer.set_header_size(1 << 14) writer.set_page_size(1 << 15) @@ -94,6 +96,7 @@ def add_and_remove_nlp_file(): os.remove("{}".format(x)) os.remove("{}.db".format(x)) + def test_cv_minddataset_writer_tutorial(): """tutorial for cv dataset writer.""" paths = ["{}{}".format(CV_FILE_NAME, str(x).rjust(1, '0')) @@ -113,6 +116,7 @@ def test_cv_minddataset_writer_tutorial(): os.remove("{}".format(x)) os.remove("{}.db".format(x)) + def test_cv_minddataset_partition_tutorial(add_and_remove_cv_file): """tutorial for cv minddataset.""" columns_list = ["data", "file_name", "label"] @@ -229,6 +233,7 @@ def test_cv_minddataset_blockreader_tutorial(add_and_remove_cv_file): num_iter += 1 assert num_iter == 20 + def test_cv_minddataset_blockreader_some_field_not_in_index_tutorial(add_and_remove_cv_file): """tutorial for cv minddataset.""" columns_list = ["id", "data", "label"] @@ -264,6 +269,7 @@ def test_cv_minddataset_reader_file_list(add_and_remove_cv_file): num_iter += 1 assert num_iter == 10 + def test_cv_minddataset_reader_one_partition(add_and_remove_cv_file): """tutorial for cv minderdataset.""" columns_list = ["data", "file_name", "label"] @@ -280,6 +286,7 @@ def test_cv_minddataset_reader_one_partition(add_and_remove_cv_file): num_iter += 1 assert num_iter < 10 + def test_cv_minddataset_reader_two_dataset(add_and_remove_cv_file): """tutorial for cv minderdataset.""" if os.path.exists(CV1_FILE_NAME): @@ -313,7 +320,8 @@ def test_cv_minddataset_reader_two_dataset(add_and_remove_cv_file): writer.commit() columns_list = ["data", "file_name", "label"] num_readers = 4 - data_set = ds.MindDataset([CV_FILE_NAME + str(x) for x in range(FILES_NUM)] + [CV1_FILE_NAME, CV2_FILE_NAME], columns_list, num_readers) + data_set = ds.MindDataset([CV_FILE_NAME + str(x) for x in range(FILES_NUM)] + [CV1_FILE_NAME, CV2_FILE_NAME], + columns_list, num_readers) assert data_set.get_dataset_size() == 30 num_iter = 0 for item in data_set.create_dict_iterator(): @@ -332,7 +340,8 @@ def test_cv_minddataset_reader_two_dataset(add_and_remove_cv_file): os.remove(CV2_FILE_NAME) if os.path.exists("{}.db".format(CV2_FILE_NAME)): os.remove("{}.db".format(CV2_FILE_NAME)) - + + def test_cv_minddataset_reader_two_dataset_partition(add_and_remove_cv_file): paths = ["{}{}".format(CV1_FILE_NAME, str(x).rjust(1, '0')) for x in range(FILES_NUM)] @@ -352,7 +361,8 @@ def test_cv_minddataset_reader_two_dataset_partition(add_and_remove_cv_file): columns_list = ["data", "file_name", "label"] num_readers = 4 - data_set = ds.MindDataset([CV_FILE_NAME + str(x) for x in range(2)] + [CV1_FILE_NAME + str(x) for x in range(2, 4)], columns_list, num_readers) + data_set = ds.MindDataset([CV_FILE_NAME + str(x) for x in range(2)] + [CV1_FILE_NAME + str(x) for x in range(2, 4)], + columns_list, num_readers) assert data_set.get_dataset_size() < 20 num_iter = 0 for item in data_set.create_dict_iterator(): @@ -384,6 +394,7 @@ def test_cv_minddataset_reader_basic_tutorial(add_and_remove_cv_file): num_iter += 1 assert num_iter == 10 + def test_nlp_minddataset_reader_basic_tutorial(add_and_remove_nlp_file): """tutorial for nlp minderdataset.""" num_readers = 4 @@ -515,6 +526,7 @@ def get_data(dir_name): continue return data_list + def get_multi_bytes_data(file_name, bytes_num=3): """ Return raw data of multi-bytes dataset. @@ -549,6 +561,7 @@ def get_multi_bytes_data(file_name, bytes_num=3): continue return data_list + def get_mkv_data(dir_name): """ Return raw data of Vehicle_and_Person dataset. @@ -587,9 +600,10 @@ def get_mkv_data(dir_name): "id": index} data_list.append(data_json) index += 1 - logger.info('{} images are missing'.format(len(file_list)-len(data_list))) + logger.info('{} images are missing'.format(len(file_list) - len(data_list))) return data_list + def get_nlp_data(dir_name, vocab_file, num): """ Return raw data of aclImdb dataset. @@ -635,6 +649,7 @@ def get_nlp_data(dir_name, vocab_file, num): } yield data + def convert_to_uni(text): if isinstance(text, str): return text @@ -642,6 +657,7 @@ def convert_to_uni(text): return text.decode('utf-8', 'ignore') raise Exception("The type %s does not convert!" % type(text)) + def load_vocab(vocab_file): """load vocabulary to translate statement.""" vocab = collections.OrderedDict() @@ -658,15 +674,17 @@ def load_vocab(vocab_file): index += 1 return vocab + def inputs(vectors, maxlen=50): length = len(vectors) if length > maxlen: - return vectors[0:maxlen], [1]*maxlen, [0]*maxlen - input_ = vectors+[0]*(maxlen-length) - mask = [1]*length + [0]*(maxlen-length) - segment = [0]*maxlen + return vectors[0:maxlen], [1] * maxlen, [0] * maxlen + input_ = vectors + [0] * (maxlen - length) + mask = [1] * length + [0] * (maxlen - length) + segment = [0] * maxlen return input_, mask, segment + def test_write_with_multi_bytes_and_array_and_read_by_MindDataset(): mindrecord_file_name = "test.mindrecord" data = [{"file_name": "001.jpg", "label": 4, @@ -902,6 +920,7 @@ def test_write_with_multi_bytes_and_array_and_read_by_MindDataset(): os.remove("{}".format(mindrecord_file_name)) os.remove("{}.db".format(mindrecord_file_name)) + def test_write_with_multi_bytes_and_MindDataset(): mindrecord_file_name = "test.mindrecord" data = [{"file_name": "001.jpg", "label": 43, @@ -1070,6 +1089,7 @@ def test_write_with_multi_bytes_and_MindDataset(): os.remove("{}".format(mindrecord_file_name)) os.remove("{}.db".format(mindrecord_file_name)) + def test_write_with_multi_array_and_MindDataset(): mindrecord_file_name = "test.mindrecord" data = [{"source_sos_ids": np.array([1, 2, 3, 4, 5], dtype=np.int64), diff --git a/tests/ut/python/dataset/test_minddataset_exception.py b/tests/ut/python/dataset/test_minddataset_exception.py index c9f56eb406..0c6af8f579 100644 --- a/tests/ut/python/dataset/test_minddataset_exception.py +++ b/tests/ut/python/dataset/test_minddataset_exception.py @@ -50,12 +50,13 @@ def create_diff_schema_cv_mindrecord(files_num): writer.write_raw_data(data) writer.commit() + def create_diff_page_size_cv_mindrecord(files_num): """tutorial for cv dataset writer.""" os.remove(CV1_FILE_NAME) if os.path.exists(CV1_FILE_NAME) else None os.remove("{}.db".format(CV1_FILE_NAME)) if os.path.exists("{}.db".format(CV1_FILE_NAME)) else None writer = FileWriter(CV1_FILE_NAME, files_num) - writer.set_page_size(1<< 26) #64MB + writer.set_page_size(1 << 26) # 64MB cv_schema_json = {"file_name": {"type": "string"}, "label": {"type": "int32"}, "data": {"type": "bytes"}} data = [{"file_name": "001.jpg", "label": 43, "data": bytes('0xffsafdafda', encoding='utf-8')}] writer.add_schema(cv_schema_json, "img_schema") @@ -63,6 +64,7 @@ def create_diff_page_size_cv_mindrecord(files_num): writer.write_raw_data(data) writer.commit() + def test_cv_lack_json(): """tutorial for cv minderdataset.""" create_cv_mindrecord(1) @@ -123,6 +125,7 @@ def test_cv_minddataset_pk_sample_error_class_column(): os.remove(CV_FILE_NAME) os.remove("{}.db".format(CV_FILE_NAME)) + def test_cv_minddataset_pk_sample_exclusive_shuffle(): create_cv_mindrecord(1) columns_list = ["data", "file_name", "label"] @@ -130,13 +133,14 @@ def test_cv_minddataset_pk_sample_exclusive_shuffle(): sampler = ds.PKSampler(2) with pytest.raises(Exception, match="sampler and shuffle cannot be specified at the same time."): data_set = ds.MindDataset(CV_FILE_NAME, columns_list, num_readers, - sampler=sampler, shuffle=False) + sampler=sampler, shuffle=False) num_iter = 0 for item in data_set.create_dict_iterator(): num_iter += 1 os.remove(CV_FILE_NAME) os.remove("{}.db".format(CV_FILE_NAME)) + def test_cv_minddataset_reader_different_schema(): create_cv_mindrecord(1) create_diff_schema_cv_mindrecord(1) @@ -144,7 +148,7 @@ def test_cv_minddataset_reader_different_schema(): num_readers = 4 with pytest.raises(Exception, match="MindRecordOp init failed"): data_set = ds.MindDataset([CV_FILE_NAME, CV1_FILE_NAME], columns_list, - num_readers) + num_readers) num_iter = 0 for item in data_set.create_dict_iterator(): num_iter += 1 @@ -153,6 +157,7 @@ def test_cv_minddataset_reader_different_schema(): os.remove(CV1_FILE_NAME) os.remove("{}.db".format(CV1_FILE_NAME)) + def test_cv_minddataset_reader_different_page_size(): create_cv_mindrecord(1) create_diff_page_size_cv_mindrecord(1) @@ -160,7 +165,7 @@ def test_cv_minddataset_reader_different_page_size(): num_readers = 4 with pytest.raises(Exception, match="MindRecordOp init failed"): data_set = ds.MindDataset([CV_FILE_NAME, CV1_FILE_NAME], columns_list, - num_readers) + num_readers) num_iter = 0 for item in data_set.create_dict_iterator(): num_iter += 1 @@ -169,6 +174,7 @@ def test_cv_minddataset_reader_different_page_size(): os.remove(CV1_FILE_NAME) os.remove("{}.db".format(CV1_FILE_NAME)) + def test_minddataset_invalidate_num_shards(): create_cv_mindrecord(1) columns_list = ["data", "label"] @@ -181,6 +187,7 @@ def test_minddataset_invalidate_num_shards(): os.remove(CV_FILE_NAME) os.remove("{}.db".format(CV_FILE_NAME)) + def test_minddataset_invalidate_shard_id(): create_cv_mindrecord(1) columns_list = ["data", "label"] @@ -193,6 +200,7 @@ def test_minddataset_invalidate_shard_id(): os.remove(CV_FILE_NAME) os.remove("{}.db".format(CV_FILE_NAME)) + def test_minddataset_shard_id_bigger_than_num_shard(): create_cv_mindrecord(1) columns_list = ["data", "label"] diff --git a/tests/ut/python/dataset/test_minddataset_multi_images.py b/tests/ut/python/dataset/test_minddataset_multi_images.py index 0a48c564c6..80b94940ab 100644 --- a/tests/ut/python/dataset/test_minddataset_multi_images.py +++ b/tests/ut/python/dataset/test_minddataset_multi_images.py @@ -42,6 +42,7 @@ def test_cv_minddataset_reader_two_png_tutorial(): num_iter += 1 assert num_iter == 5 + def test_cv_minddataset_reader_two_png_tutorial_just_image2(): """tutorial for cv minderdataset.""" columns_list = ["img_data", "label_data"] @@ -58,4 +59,3 @@ def test_cv_minddataset_reader_two_png_tutorial_just_image2(): logger.info("-------------- item[label_data][500:520] is {} -------".format(item["label_data"][500:520])) num_iter += 1 assert num_iter == 5 - diff --git a/tests/ut/python/dataset/test_minddataset_multi_images_and_ndarray.py b/tests/ut/python/dataset/test_minddataset_multi_images_and_ndarray.py index a68557e203..47215dc0a7 100644 --- a/tests/ut/python/dataset/test_minddataset_multi_images_and_ndarray.py +++ b/tests/ut/python/dataset/test_minddataset_multi_images_and_ndarray.py @@ -26,15 +26,16 @@ from mindspore.mindrecord import FileWriter FILES_NUM = 1 CV_FILE_NAME = "./complex.mindrecord" + def test_cv_minddataset_reader_multi_image_and_ndarray_tutorial(): writer = FileWriter(CV_FILE_NAME, FILES_NUM) - cv_schema_json={"id": {"type": "int32"}, - "image_0": {"type": "bytes"}, - "image_2": {"type": "bytes"}, - "image_3": {"type": "bytes"}, - "image_4": {"type": "bytes"}, - "input_mask": {"type": "int32", "shape": [-1]}, - "segments": {"type": "float32", "shape": [2, 3]}} + cv_schema_json = {"id": {"type": "int32"}, + "image_0": {"type": "bytes"}, + "image_2": {"type": "bytes"}, + "image_3": {"type": "bytes"}, + "image_4": {"type": "bytes"}, + "input_mask": {"type": "int32", "shape": [-1]}, + "segments": {"type": "float32", "shape": [2, 3]}} writer.add_schema(cv_schema_json, "two_images_schema") with open("../data/mindrecord/testImageNetData/images/image_00010.jpg", "rb") as file_reader: img_data = file_reader.read() @@ -67,7 +68,7 @@ def test_cv_minddataset_reader_multi_image_and_ndarray_tutorial(): assert item["image_3"].dtype == np.uint8 assert item["image_4"].dtype == np.uint8 assert item["id"].dtype == np.int32 - assert item["input_mask"].shape == (5, ) + assert item["input_mask"].shape == (5,) assert item["input_mask"].dtype == np.int32 assert item["segments"].shape == (2, 3) assert item["segments"].dtype == np.float32 diff --git a/tests/ut/python/dataset/test_minddataset_sampler.py b/tests/ut/python/dataset/test_minddataset_sampler.py index 5656a08ae4..71fda5682e 100644 --- a/tests/ut/python/dataset/test_minddataset_sampler.py +++ b/tests/ut/python/dataset/test_minddataset_sampler.py @@ -60,6 +60,7 @@ def add_and_remove_cv_file(): os.remove("{}".format(x)) os.remove("{}.db".format(x)) + def test_cv_minddataset_pk_sample_no_column(add_and_remove_cv_file): """tutorial for cv minderdataset.""" num_readers = 4 @@ -75,6 +76,8 @@ def test_cv_minddataset_pk_sample_no_column(add_and_remove_cv_file): {}------------------------".format("".join([chr(x) for x in item["file_name"]]))) logger.info("-------------- item[label]: {} ----------------------------".format(item["label"])) num_iter += 1 + + def test_cv_minddataset_pk_sample_basic(add_and_remove_cv_file): """tutorial for cv minderdataset.""" columns_list = ["data", "file_name", "label"] @@ -92,7 +95,7 @@ def test_cv_minddataset_pk_sample_basic(add_and_remove_cv_file): logger.info("-------------- item[label]: {} ----------------------------".format(item["label"])) num_iter += 1 - + def test_cv_minddataset_pk_sample_shuffle(add_and_remove_cv_file): """tutorial for cv minderdataset.""" columns_list = ["data", "file_name", "label"] @@ -159,7 +162,7 @@ def test_cv_minddataset_subset_random_sample_replica(add_and_remove_cv_file): sampler = ds.SubsetRandomSampler(indices) data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers, sampler=sampler) - assert data_set.get_dataset_size() == 6 + assert data_set.get_dataset_size() == 6 num_iter = 0 for item in data_set.create_dict_iterator(): logger.info( @@ -205,7 +208,7 @@ def test_cv_minddataset_subset_random_sample_out_of_range(add_and_remove_cv_file sampler = ds.SubsetRandomSampler(indices) data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers, sampler=sampler) - assert data_set.get_dataset_size() == 5 + assert data_set.get_dataset_size() == 5 num_iter = 0 for item in data_set.create_dict_iterator(): logger.info( diff --git a/tests/ut/python/dataset/test_mixup_label_smoothing.py b/tests/ut/python/dataset/test_mixup_label_smoothing.py index 83c1cab738..69065962df 100644 --- a/tests/ut/python/dataset/test_mixup_label_smoothing.py +++ b/tests/ut/python/dataset/test_mixup_label_smoothing.py @@ -41,14 +41,14 @@ def test_one_hot_op(): transform_label = py_vision.ComposeOp(transforms) dataset = dataset.map(input_columns=["label"], operations=transform_label()) - golden_label = np.ones(num_classes)*epsilon_para/num_classes - golden_label[1] = 1 - epsilon_para/num_classes + golden_label = np.ones(num_classes) * epsilon_para / num_classes + golden_label[1] = 1 - epsilon_para / num_classes for data in dataset.create_dict_iterator(): label = data["label"] logger.info("label is {}".format(label)) logger.info("golden_label is {}".format(golden_label)) - assert(label.all() == golden_label.all()) + assert (label.all() == golden_label.all()) logger.info("====test one hot op ok====") @@ -67,7 +67,7 @@ def test_mix_up_single(): num_classes = 10 decode_op = c_vision.Decode() resize_op = c_vision.Resize((resize_height, resize_width), c_vision.Inter.LINEAR) - one_hot_encode = c.OneHot(num_classes) # num_classes is input argument + one_hot_encode = c.OneHot(num_classes) # num_classes is input argument ds1 = ds1.map(input_columns=["image"], operations=decode_op) ds1 = ds1.map(input_columns=["image"], operations=resize_op) @@ -93,10 +93,10 @@ def test_mix_up_single(): logger.info("label2 is {}".format(label2)) lam = np.abs(label - label2) - for index in range(batch_size-1): + for index in range(batch_size - 1): if np.square(lam[index]).mean() != 0: - lam_value = 1 - np.sum(lam[index])/2 - img_golden = lam_value * image2[index] + (1-lam_value)*image2[index+1] + lam_value = 1 - np.sum(lam[index]) / 2 + img_golden = lam_value * image2[index] + (1 - lam_value) * image2[index + 1] assert image1[index].all() == img_golden.all() logger.info("====test single batch mixup ok====") @@ -116,7 +116,7 @@ def test_mix_up_multi(): num_classes = 3 decode_op = c_vision.Decode() resize_op = c_vision.Resize((resize_height, resize_width), c_vision.Inter.LINEAR) - one_hot_encode = c.OneHot(num_classes) # num_classes is input argument + one_hot_encode = c.OneHot(num_classes) # num_classes is input argument ds1 = ds1.map(input_columns=["image"], operations=decode_op) ds1 = ds1.map(input_columns=["image"], operations=resize_op) @@ -150,8 +150,8 @@ def test_mix_up_multi(): logger.info("lam value in multi: {}".format(lam)) for index in range(batch_size): if np.square(lam[index]).mean() != 0: - lam_value = 1 - np.sum(lam[index])/2 - img_golden = lam_value * image2[index] + (1-lam_value)*batch1_image1[index] + lam_value = 1 - np.sum(lam[index]) / 2 + img_golden = lam_value * image2[index] + (1 - lam_value) * batch1_image1[index] assert image1[index].all() == img_golden.all() logger.info("====test several batch mixup ok====") break diff --git a/tests/ut/python/dataset/test_pad.py b/tests/ut/python/dataset/test_pad.py index 449a83ae14..6d2f92d1e3 100644 --- a/tests/ut/python/dataset/test_pad.py +++ b/tests/ut/python/dataset/test_pad.py @@ -27,6 +27,7 @@ from util import diff_mse DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" + def test_pad_op(): """ Test Pad op @@ -69,11 +70,12 @@ def test_pad_op(): assert mse < 0.01 -def test_pad_grayscale(): +def test_pad_grayscale(): """ Tests that the pad works for grayscale images """ - def channel_swap(image): + + def channel_swap(image): """ Py func hack for our pytransforms to work with c transforms """ @@ -81,7 +83,7 @@ def test_pad_grayscale(): transforms = [ py_vision.Decode(), - py_vision.Grayscale(1), + py_vision.Grayscale(1), py_vision.ToTensor(), (lambda image: channel_swap(image)) ] diff --git a/tests/ut/python/dataset/test_project.py b/tests/ut/python/dataset/test_project.py index 39734b208d..554476fd0f 100644 --- a/tests/ut/python/dataset/test_project.py +++ b/tests/ut/python/dataset/test_project.py @@ -17,7 +17,6 @@ import mindspore.dataset.transforms.c_transforms as C from mindspore.common import dtype as mstype from util import save_and_check_tuple - DATA_DIR_TF = ["../data/dataset/testTFTestAllTypes/test.data"] SCHEMA_DIR_TF = "../data/dataset/testTFTestAllTypes/datasetSchema.json" GENERATE_GOLDEN = False diff --git a/tests/ut/python/dataset/test_pyfunc.py b/tests/ut/python/dataset/test_pyfunc.py index f77e5bd463..7e41f1b7fd 100644 --- a/tests/ut/python/dataset/test_pyfunc.py +++ b/tests/ut/python/dataset/test_pyfunc.py @@ -54,7 +54,7 @@ def test_case_1(): # apply dataset operations data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) data1 = data1.map(input_columns=col, output_columns=["out0", "out1"], operations=(lambda x: (x, x + x)), - columns_order=["out0", "out1"]) + columns_order=["out0", "out1"]) i = 0 for item in data1.create_dict_iterator(): # each data is a dictionary @@ -78,7 +78,7 @@ def test_case_2(): data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) data1 = data1.map(input_columns=col, output_columns="out", operations=(lambda x, y: x + y), - columns_order=["out"]) + columns_order=["out"]) i = 0 for item in data1.create_dict_iterator(): # each data is a dictionary @@ -100,7 +100,7 @@ def test_case_3(): data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) data1 = data1.map(input_columns=col, output_columns=["out0", "out1", "out2"], - operations=(lambda x, y: (x, x + y, x + y + 1)), columns_order=["out0", "out1", "out2"]) + operations=(lambda x, y: (x, x + y, x + y + 1)), columns_order=["out0", "out1", "out2"]) i = 0 for item in data1.create_dict_iterator(): # each data is a dictionary @@ -126,7 +126,7 @@ def test_case_4(): data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) data1 = data1.map(input_columns=col, output_columns=["out0", "out1", "out2"], num_parallel_workers=4, - operations=(lambda x, y: (x, x + y, x + y + 1)), columns_order=["out0", "out1", "out2"]) + operations=(lambda x, y: (x, x + y, x + y + 1)), columns_order=["out0", "out1", "out2"]) i = 0 for item in data1.create_dict_iterator(): # each data is a dictionary @@ -172,7 +172,7 @@ def test_case_6(): data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) data1 = data1.map(input_columns="col0", output_columns="out", - operations=[(lambda x: x + x), (lambda x: x + x)]) + operations=[(lambda x: x + x), (lambda x: x + x)]) i = 0 for item in data1.create_dict_iterator(): # each data is a dictionary @@ -192,7 +192,7 @@ def test_case_7(): data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) data1 = data1.map(input_columns="col0", output_columns="out", operations=(lambda x: x + x), - num_parallel_workers=4, python_multiprocessing = True) + num_parallel_workers=4, python_multiprocessing=True) i = 0 for item in data1.create_dict_iterator(): # each data is a dictionary @@ -259,7 +259,7 @@ def test_pyfunc_execption(): with pytest.raises(RuntimeError) as info: # apply dataset operations data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) - data1 = data1.map(input_columns="col0", output_columns="out", operations= pyfunc, + data1 = data1.map(input_columns="col0", output_columns="out", operations=pyfunc, num_parallel_workers=4) for _ in data1: pass @@ -275,8 +275,8 @@ def skip_test_pyfunc_execption_multiprocess(): with pytest.raises(RuntimeError) as info: # apply dataset operations data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) - data1 = data1.map(input_columns="col0", output_columns="out", operations= pyfunc, - num_parallel_workers=4, python_multiprocessing = True) + data1 = data1.map(input_columns="col0", output_columns="out", operations=pyfunc, + num_parallel_workers=4, python_multiprocessing=True) for _ in data1: pass assert "MP Pyfunc Throw" in str(info.value) diff --git a/tests/ut/python/dataset/test_random_color.py b/tests/ut/python/dataset/test_random_color.py index 9472b7e35a..90fc881b39 100644 --- a/tests/ut/python/dataset/test_random_color.py +++ b/tests/ut/python/dataset/test_random_color.py @@ -37,66 +37,66 @@ def visualize(image_original, image_random_color): plt.title("DE Random Color image") plt.show() - -def test_random_color(degrees=(0.1,1.9), plot=False): + +def test_random_color(degrees=(0.1, 1.9), plot=False): """ Test RandomColor """ logger.info("Test RandomColor") - + # Original Images - ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) - + ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) + transforms_original = F.ComposeOp([F.Decode(), - F.Resize((224,224)), - F.ToTensor()]) - + F.Resize((224, 224)), + F.ToTensor()]) + ds_original = ds.map(input_columns="image", operations=transforms_original()) - + ds_original = ds_original.batch(512) - - for idx, (image,label) in enumerate(ds_original): + + for idx, (image, label) in enumerate(ds_original): if idx == 0: - images_original = np.transpose(image, (0, 2,3,1)) + images_original = np.transpose(image, (0, 2, 3, 1)) else: images_original = np.append(images_original, - np.transpose(image, (0, 2,3,1)), - axis=0) + np.transpose(image, (0, 2, 3, 1)), + axis=0) + + # Random Color Adjusted Images + ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) - # Random Color Adjusted Images - ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) - transforms_random_color = F.ComposeOp([F.Decode(), - F.Resize((224,224)), + F.Resize((224, 224)), F.RandomColor(degrees=degrees), - F.ToTensor()]) - + F.ToTensor()]) + ds_random_color = ds.map(input_columns="image", operations=transforms_random_color()) - - ds_random_color = ds_random_color.batch(512) - - for idx, (image,label) in enumerate(ds_random_color): + + ds_random_color = ds_random_color.batch(512) + + for idx, (image, label) in enumerate(ds_random_color): if idx == 0: - images_random_color = np.transpose(image, (0, 2,3,1)) + images_random_color = np.transpose(image, (0, 2, 3, 1)) else: images_random_color = np.append(images_random_color, - np.transpose(image, (0, 2,3,1)), + np.transpose(image, (0, 2, 3, 1)), axis=0) num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): - mse[i] = np.mean((images_random_color[i]-images_original[i])**2) + mse[i] = np.mean((images_random_color[i] - images_original[i]) ** 2) logger.info("MSE= {}".format(str(np.mean(mse)))) - + if plot: visualize(images_original, images_random_color) - + if __name__ == "__main__": test_random_color() test_random_color(plot=True) - test_random_color(degrees=(0.5,1.5), plot=True) + test_random_color(degrees=(0.5, 1.5), plot=True) diff --git a/tests/ut/python/dataset/test_random_color_adjust.py b/tests/ut/python/dataset/test_random_color_adjust.py index e03f8dcc40..39ebf2f1a3 100644 --- a/tests/ut/python/dataset/test_random_color_adjust.py +++ b/tests/ut/python/dataset/test_random_color_adjust.py @@ -253,11 +253,12 @@ def test_random_color_adjust_op_hue(plot=False): visualize(c_image, mse, py_image) -def test_random_color_adjust_grayscale(): +def test_random_color_adjust_grayscale(): """ Tests that the random color adjust works for grayscale images """ - def channel_swap(image): + + def channel_swap(image): """ Py func hack for our pytransforms to work with c transforms """ @@ -265,7 +266,7 @@ def test_random_color_adjust_grayscale(): transforms = [ py_vision.Decode(), - py_vision.Grayscale(1), + py_vision.Grayscale(1), py_vision.ToTensor(), (lambda image: channel_swap(image)) ] @@ -276,7 +277,7 @@ def test_random_color_adjust_grayscale(): # if input is grayscale, the output dimensions should be single channel, the following should fail random_adjust_op = c_vision.RandomColorAdjust((1, 1), (1, 1), (1, 1), (0.2, 0.2)) - try: + try: data1 = data1.map(input_columns=["image"], operations=random_adjust_op) dataset_shape_1 = [] for item1 in data1.create_dict_iterator(): diff --git a/tests/ut/python/dataset/test_random_crop.py b/tests/ut/python/dataset/test_random_crop.py index 2ef3a17dcc..55dd7ffb07 100644 --- a/tests/ut/python/dataset/test_random_crop.py +++ b/tests/ut/python/dataset/test_random_crop.py @@ -67,4 +67,3 @@ def test_random_crop_op(): if __name__ == "__main__": test_random_crop_op() - diff --git a/tests/ut/python/dataset/test_random_dataset.py b/tests/ut/python/dataset/test_random_dataset.py index 16c43ea971..b0fdffd4e2 100644 --- a/tests/ut/python/dataset/test_random_dataset.py +++ b/tests/ut/python/dataset/test_random_dataset.py @@ -17,6 +17,7 @@ import mindspore.dataset as ds from mindspore import log as logger from pathlib import Path + # just a basic test with parallel random data op def test_randomdataset_basic1(): logger.info("Test randomdataset basic") @@ -37,14 +38,16 @@ def test_randomdataset_basic1(): num_iter += 1 logger.info("Number of data in ds1: ", num_iter) - assert(num_iter == 200) + assert (num_iter == 200) + # Another simple test def test_randomdataset_basic2(): logger.info("Test randomdataset basic 2") schema = ds.Schema() - schema.add_column('image', de_type=mstype.uint8, shape=[640,480,3]) # 921600 bytes (a bit less than 1 MB per image) + schema.add_column('image', de_type=mstype.uint8, + shape=[640, 480, 3]) # 921600 bytes (a bit less than 1 MB per image) schema.add_column('label', de_type=mstype.uint8, shape=[1]) # Make up about 10 samples @@ -56,16 +59,15 @@ def test_randomdataset_basic2(): num_iter = 0 for data in ds1.create_dict_iterator(): # each data is a dictionary # in this example, each dictionary has keys "image" and "label" - #logger.info(data["image"]) + # logger.info(data["image"]) logger.info("printing the label: {}".format(data["label"])) num_iter += 1 logger.info("Number of data in ds1: ", num_iter) - assert(num_iter == 40) + assert (num_iter == 40) if __name__ == '__main__': test_randomdataset_basic1() test_randomdataset_basic2() logger.info('test_randomdataset_basic Ended.\n') - diff --git a/tests/ut/python/dataset/test_random_sharpness.py b/tests/ut/python/dataset/test_random_sharpness.py index 949a658597..880d0acb1e 100644 --- a/tests/ut/python/dataset/test_random_sharpness.py +++ b/tests/ut/python/dataset/test_random_sharpness.py @@ -37,66 +37,66 @@ def visualize(image_original, image_random_sharpness): plt.title("DE Random Sharpness image") plt.show() - -def test_random_sharpness(degrees=(0.1,1.9), plot=False): + +def test_random_sharpness(degrees=(0.1, 1.9), plot=False): """ Test RandomSharpness """ logger.info("Test RandomSharpness") - + # Original Images - ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) - + ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) + transforms_original = F.ComposeOp([F.Decode(), - F.Resize((224,224)), - F.ToTensor()]) - + F.Resize((224, 224)), + F.ToTensor()]) + ds_original = ds.map(input_columns="image", operations=transforms_original()) - + ds_original = ds_original.batch(512) - - for idx, (image,label) in enumerate(ds_original): + + for idx, (image, label) in enumerate(ds_original): if idx == 0: - images_original = np.transpose(image, (0, 2,3,1)) + images_original = np.transpose(image, (0, 2, 3, 1)) else: images_original = np.append(images_original, - np.transpose(image, (0, 2,3,1)), - axis=0) + np.transpose(image, (0, 2, 3, 1)), + axis=0) + + # Random Sharpness Adjusted Images + ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) - # Random Sharpness Adjusted Images - ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) - transforms_random_sharpness = F.ComposeOp([F.Decode(), - F.Resize((224,224)), + F.Resize((224, 224)), F.RandomSharpness(degrees=degrees), - F.ToTensor()]) - + F.ToTensor()]) + ds_random_sharpness = ds.map(input_columns="image", operations=transforms_random_sharpness()) - - ds_random_sharpness = ds_random_sharpness.batch(512) - - for idx, (image,label) in enumerate(ds_random_sharpness): + + ds_random_sharpness = ds_random_sharpness.batch(512) + + for idx, (image, label) in enumerate(ds_random_sharpness): if idx == 0: - images_random_sharpness = np.transpose(image, (0, 2,3,1)) + images_random_sharpness = np.transpose(image, (0, 2, 3, 1)) else: images_random_sharpness = np.append(images_random_sharpness, - np.transpose(image, (0, 2,3,1)), + np.transpose(image, (0, 2, 3, 1)), axis=0) - + num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): - mse[i] = np.mean((images_random_sharpness[i]-images_original[i])**2) + mse[i] = np.mean((images_random_sharpness[i] - images_original[i]) ** 2) logger.info("MSE= {}".format(str(np.mean(mse)))) if plot: visualize(images_original, images_random_sharpness) - + if __name__ == "__main__": test_random_sharpness() test_random_sharpness(plot=True) - test_random_sharpness(degrees=(0.5,1.5), plot=True) + test_random_sharpness(degrees=(0.5, 1.5), plot=True) diff --git a/tests/ut/python/dataset/test_rename.py b/tests/ut/python/dataset/test_rename.py index 5e7b28ed7e..cc802845b9 100644 --- a/tests/ut/python/dataset/test_rename.py +++ b/tests/ut/python/dataset/test_rename.py @@ -35,9 +35,9 @@ def test_rename(): for i, item in enumerate(data.create_dict_iterator()): logger.info("item[mask] is {}".format(item["masks"])) - np.testing.assert_equal (item["masks"], item["input_ids"]) + np.testing.assert_equal(item["masks"], item["input_ids"]) logger.info("item[seg_ids] is {}".format(item["seg_ids"])) - np.testing.assert_equal (item["segment_ids"], item["seg_ids"]) + np.testing.assert_equal(item["segment_ids"], item["seg_ids"]) # need to consume the data in the buffer num_iter += 1 logger.info("Number of data in data: {}".format(num_iter)) diff --git a/tests/ut/python/dataset/test_rgb_hsv.py b/tests/ut/python/dataset/test_rgb_hsv.py index 14ab9ddc2d..20d501dc47 100644 --- a/tests/ut/python/dataset/test_rgb_hsv.py +++ b/tests/ut/python/dataset/test_rgb_hsv.py @@ -27,6 +27,7 @@ import mindspore.dataset.transforms.vision.py_transforms_util as util DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" + def generate_numpy_random_rgb(shape): # Only generate floating points that are fractions like n / 256, since they # are RGB pixels. Some low-precision floating point types in this test can't @@ -167,4 +168,3 @@ if __name__ == "__main__": test_rgb_hsv_chw() test_rgb_hsv_batch_chw() test_rgb_hsv_pipeline() - diff --git a/tests/ut/python/dataset/test_sampler.py b/tests/ut/python/dataset/test_sampler.py index 4efca6f818..93518b3f9c 100644 --- a/tests/ut/python/dataset/test_sampler.py +++ b/tests/ut/python/dataset/test_sampler.py @@ -146,7 +146,7 @@ def test_python_sampler(): for i in range(99, -1, -1): yield i - data1 = ds.GeneratorDataset([(np.array(i),) for i in range(100)], ["data"], sampler = MySampler()) + data1 = ds.GeneratorDataset([(np.array(i),) for i in range(100)], ["data"], sampler=MySampler()) i = 99 for data in data1: assert data[0] == (np.array(i),) @@ -168,4 +168,4 @@ if __name__ == '__main__': test_random_sampler(True) test_random_sampler_multi_iter(True) test_sampler_py_api() - test_python_sampler() \ No newline at end of file + test_python_sampler() diff --git a/tests/ut/python/dataset/test_serdes_dataset.py b/tests/ut/python/dataset/test_serdes_dataset.py index 0a6f86974b..62e8649ed0 100644 --- a/tests/ut/python/dataset/test_serdes_dataset.py +++ b/tests/ut/python/dataset/test_serdes_dataset.py @@ -28,6 +28,7 @@ import mindspore.dataset.transforms.vision.c_transforms as vision from mindspore.dataset.transforms.vision import Inter from mindspore import log as logger + def test_imagefolder(remove_json_files=True): """ Test simulating resnet50 dataset pipeline. @@ -140,9 +141,9 @@ def test_zip_dataset(remove_json_files=True): data2 = ds.TFRecordDataset(files, schema=schema_file, shuffle=ds.Shuffle.FILES) data2 = data2.shuffle(10000) data2 = data2.rename(input_columns=["col_sint16", "col_sint32", "col_sint64", "col_float", - "col_1d", "col_2d", "col_3d", "col_binary"], - output_columns=["column_sint16", "column_sint32", "column_sint64", "column_float", - "column_1d", "column_2d", "column_3d", "column_binary"]) + "col_1d", "col_2d", "col_3d", "col_binary"], + output_columns=["column_sint16", "column_sint32", "column_sint64", "column_float", + "column_1d", "column_2d", "column_3d", "column_binary"]) data3 = ds.zip((data1, data2)) ds.serialize(data3, "zip_dataset_pipeline.json") assert (validate_jsonfile("zip_dataset_pipeline.json") is True) @@ -169,6 +170,7 @@ def test_zip_dataset(remove_json_files=True): if remove_json_files: delete_json_files() + def test_random_crop(): logger.info("test_random_crop") DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] @@ -198,6 +200,7 @@ def test_random_crop(): assert (np.array_equal(item1['image'], item1_1['image'])) image2 = item2["image"] + def validate_jsonfile(filepath): try: file_exist = os.path.exists(filepath) @@ -216,10 +219,12 @@ def delete_json_files(): except IOError: logger.info("Error while deleting: {}".format(f)) + # Test save load minddataset from test_minddataset_sampler import add_and_remove_cv_file, get_data, CV_DIR_NAME, CV_FILE_NAME, FILES_NUM, \ FileWriter, Inter + def test_minddataset(add_and_remove_cv_file): """tutorial for cv minderdataset.""" columns_list = ["data", "file_name", "label"] diff --git a/tests/ut/python/dataset/test_shuffle.py b/tests/ut/python/dataset/test_shuffle.py index 359bdea648..f1b69f928a 100644 --- a/tests/ut/python/dataset/test_shuffle.py +++ b/tests/ut/python/dataset/test_shuffle.py @@ -136,7 +136,7 @@ def test_shuffle_06(): data2 = data2.shuffle(buffer_size=buffer_size) for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): - np.testing.assert_equal (item1, item2) + np.testing.assert_equal(item1, item2) def test_shuffle_exception_01(): diff --git a/tests/ut/python/dataset/test_skip.py b/tests/ut/python/dataset/test_skip.py index ccbf40a55b..f257426364 100644 --- a/tests/ut/python/dataset/test_skip.py +++ b/tests/ut/python/dataset/test_skip.py @@ -46,7 +46,7 @@ def generator_md(): create a dataset with [0, 1, 2, 3, 4] """ for i in range(5): - yield (np.array([i]), ) + yield (np.array([i]),) def test_generator_skip(): @@ -138,6 +138,7 @@ def test_skip_repeat_3(): assert len(buf) == 6 assert buf == [3, 4, 3, 4, 3, 4] + def test_skip_take_1(): ds1 = ds.GeneratorDataset(generator_md, ["data"]) @@ -153,6 +154,7 @@ def test_skip_take_1(): assert len(buf) == 2 assert buf == [2, 3] + def test_skip_take_2(): ds1 = ds.GeneratorDataset(generator_md, ["data"]) @@ -171,7 +173,8 @@ def test_skip_take_2(): def generator_1d(): for i in range(64): - yield (np.array([i]), ) + yield (np.array([i]),) + def test_skip_filter_1(): dataset = ds.GeneratorDataset(generator_1d, ['data']) @@ -183,6 +186,7 @@ def test_skip_filter_1(): buf.append(item[0][0]) assert buf == [5, 6, 7, 8, 9, 10] + def test_skip_filter_2(): dataset = ds.GeneratorDataset(generator_1d, ['data']) dataset = dataset.filter(predicate=lambda data: data < 11, num_parallel_workers=4) diff --git a/tests/ut/python/dataset/test_sync_wait.py b/tests/ut/python/dataset/test_sync_wait.py index 7e9fade39d..552782a849 100644 --- a/tests/ut/python/dataset/test_sync_wait.py +++ b/tests/ut/python/dataset/test_sync_wait.py @@ -27,10 +27,10 @@ def gen(): class Augment: def __init__(self, loss): self.loss = loss - + def preprocess(self, input): return input - + def update(self, data): self.loss = data["loss"] @@ -75,7 +75,7 @@ def test_simple_shuffle_sync(): count = 0 for data in dataset.create_dict_iterator(): count += 1 - #time.sleep(0.5) + # time.sleep(0.5) data = {"loss": count} dataset.sync_update(condition_name="policy", data=data) @@ -94,9 +94,9 @@ def test_two_sync(): dataset = dataset.sync_wait(condition_name="every batch", callback=aug.update) dataset = dataset.map(input_columns=["input"], operations=[aug.preprocess]) - + dataset = dataset.sync_wait(num_batch=2, condition_name="every 2 batches") - + dataset = dataset.batch(batch_size) count = 0 @@ -131,7 +131,7 @@ def test_sync_epoch(): dataset.sync_update(condition_name="policy", data=data) -def test_multiple_iterators(): +def test_multiple_iterators(): """ Test sync wait with multiple iterators: will start multiple """ @@ -152,7 +152,7 @@ def test_multiple_iterators(): dataset2 = dataset2.batch(batch_size, drop_remainder=True) for item1, item2 in zip(dataset.create_dict_iterator(), dataset2.create_dict_iterator()): - assert (item1["input"][0] == item2["input"][0]) + assert (item1["input"][0] == item2["input"][0]) data1 = {"loss": item1["input"][0]} data2 = {"loss": item2["input"][0]} dataset.sync_update(condition_name="policy", data=data1) @@ -192,16 +192,16 @@ def test_sync_exception_02(): aug = Augment(0) # notice that with our design, we need to have step_size = shuffle size dataset = dataset.sync_wait(condition_name="every batch", callback=aug.update) - + dataset = dataset.map(input_columns=["input"], operations=[aug.preprocess]) - + try: dataset = dataset.sync_wait(num_batch=2, condition_name="every batch") except BaseException as e: assert "name" in str(e) dataset = dataset.batch(batch_size) - - + + if __name__ == "__main__": test_simple_sync_wait() test_simple_shuffle_sync() diff --git a/tests/ut/python/dataset/test_take.py b/tests/ut/python/dataset/test_take.py index 64efc7a785..cd0a443439 100644 --- a/tests/ut/python/dataset/test_take.py +++ b/tests/ut/python/dataset/test_take.py @@ -126,7 +126,7 @@ def test_take_06(): """ logger.info("test_take_06") data1 = ds.GeneratorDataset(generator, ["data"]) - + data1 = data1.repeat(2) data1 = data1.take(4) @@ -143,7 +143,7 @@ def test_take_07(): """ logger.info("test_take_07") data1 = ds.GeneratorDataset(generator, ["data"]) - + data1 = data1.take(2) data1 = data1.batch(2) assert sum([1 for _ in data1]) == 1 @@ -167,7 +167,7 @@ def test_take_09(): """ logger.info("test_take_09") data1 = ds.GeneratorDataset(generator, ["data"]) - + data1 = data1.repeat(2) data1 = data1.take(-1) @@ -360,4 +360,4 @@ if __name__ == '__main__': test_take_16() test_take_17() test_take_18() - logger.info('== test take operation finished ==') \ No newline at end of file + logger.info('== test take operation finished ==') diff --git a/tests/ut/python/dataset/test_tensor_string.py b/tests/ut/python/dataset/test_tensor_string.py index 7fd6300865..3fe21ba570 100644 --- a/tests/ut/python/dataset/test_tensor_string.py +++ b/tests/ut/python/dataset/test_tensor_string.py @@ -62,4 +62,4 @@ def test_batching_strings(): if __name__ == '__main__': test_generator() test_basic() - test_batching_strings() \ No newline at end of file + test_batching_strings() diff --git a/tests/ut/python/dataset/test_tfreader_op.py b/tests/ut/python/dataset/test_tfreader_op.py index 0f60b404a2..67d54385e9 100644 --- a/tests/ut/python/dataset/test_tfreader_op.py +++ b/tests/ut/python/dataset/test_tfreader_op.py @@ -20,7 +20,6 @@ import mindspore.dataset as ds from mindspore import log as logger import pytest - FILES = ["../data/dataset/testTFTestAllTypes/test.data"] DATASET_ROOT = "../data/dataset/testTFTestAllTypes/" SCHEMA_FILE = "../data/dataset/testTFTestAllTypes/datasetSchema.json" @@ -233,6 +232,7 @@ def test_tf_record_schema_columns_list(): a = row["col_sint32"] assert "col_sint32" in str(info.value) + def test_case_invalid_files(): valid_file = "../data/dataset/testTFTestAllTypes/test.data" invalid_file = "../data/dataset/testTFTestAllTypes/invalidFile.txt" @@ -259,6 +259,7 @@ def test_case_invalid_files(): assert SCHEMA_FILE not in str(info.value) assert nonexistent_file in str(info.value) + if __name__ == '__main__': test_case_tf_shape() test_case_tf_read_all_dataset() @@ -271,7 +272,7 @@ if __name__ == '__main__': test_tf_files() test_tf_record_schema() test_tf_record_shuffle() - #test_tf_record_shard() + # test_tf_record_shard() test_tf_shard_equal_rows() test_case_tf_file_no_schema_columns_list() test_tf_record_schema_columns_list() diff --git a/tests/ut/python/dataset/test_type_cast.py b/tests/ut/python/dataset/test_type_cast.py index 05db39da45..95a935bf03 100644 --- a/tests/ut/python/dataset/test_type_cast.py +++ b/tests/ut/python/dataset/test_type_cast.py @@ -79,7 +79,7 @@ def test_type_cast_string(): data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) decode_op = c_vision.Decode() - type_cast_op = data_util.TypeCast(mstype.float16 ) + type_cast_op = data_util.TypeCast(mstype.float16) ctrans = [decode_op, type_cast_op diff --git a/tests/ut/python/dataset/test_uniform_augment.py b/tests/ut/python/dataset/test_uniform_augment.py index 98c22fb3cb..adda8560b5 100644 --- a/tests/ut/python/dataset/test_uniform_augment.py +++ b/tests/ut/python/dataset/test_uniform_augment.py @@ -22,6 +22,7 @@ import mindspore.dataset.transforms.vision.c_transforms as C DATA_DIR = "../data/dataset/testImageNetData/train/" + def visualize(image_original, image_ua): """ visualizes the image using DE op and Numpy op @@ -37,71 +38,72 @@ def visualize(image_original, image_ua): plt.title("DE UniformAugment image") plt.show() - + def test_uniform_augment(plot=False, num_ops=2): """ Test UniformAugment """ logger.info("Test UniformAugment") - + # Original Images - ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) - + ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) + transforms_original = F.ComposeOp([F.Decode(), - F.Resize((224,224)), - F.ToTensor()]) - + F.Resize((224, 224)), + F.ToTensor()]) + ds_original = ds.map(input_columns="image", operations=transforms_original()) - + ds_original = ds_original.batch(512) - - for idx, (image,label) in enumerate(ds_original): + + for idx, (image, label) in enumerate(ds_original): if idx == 0: - images_original = np.transpose(image, (0, 2,3,1)) + images_original = np.transpose(image, (0, 2, 3, 1)) else: images_original = np.append(images_original, - np.transpose(image, (0, 2,3,1)), - axis=0) + np.transpose(image, (0, 2, 3, 1)), + axis=0) + + # UniformAugment Images + ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) - # UniformAugment Images - ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) - transform_list = [F.RandomRotation(45), F.RandomColor(), F.RandomSharpness(), F.Invert(), F.AutoContrast(), F.Equalize()] - + transforms_ua = F.ComposeOp([F.Decode(), - F.Resize((224,224)), + F.Resize((224, 224)), F.UniformAugment(transforms=transform_list, num_ops=num_ops), - F.ToTensor()]) - + F.ToTensor()]) + ds_ua = ds.map(input_columns="image", operations=transforms_ua()) - - ds_ua = ds_ua.batch(512) - - for idx, (image,label) in enumerate(ds_ua): + + ds_ua = ds_ua.batch(512) + + for idx, (image, label) in enumerate(ds_ua): if idx == 0: - images_ua = np.transpose(image, (0, 2,3,1)) + images_ua = np.transpose(image, (0, 2, 3, 1)) else: images_ua = np.append(images_ua, - np.transpose(image, (0, 2,3,1)), + np.transpose(image, (0, 2, 3, 1)), axis=0) - + num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): - mse[i] = np.mean((images_ua[i]-images_original[i])**2) + mse[i] = np.mean((images_ua[i] - images_original[i]) ** 2) logger.info("MSE= {}".format(str(np.mean(mse)))) - + if plot: visualize(images_original, images_ua) - + + def test_cpp_uniform_augment(plot=False, num_ops=2): """ Test UniformAugment @@ -119,7 +121,7 @@ def test_cpp_uniform_augment(plot=False, num_ops=2): ds_original = ds_original.batch(512) - for idx, (image,label) in enumerate(ds_original): + for idx, (image, label) in enumerate(ds_original): if idx == 0: images_original = np.transpose(image, (0, 2, 3, 1)) else: @@ -127,7 +129,6 @@ def test_cpp_uniform_augment(plot=False, num_ops=2): np.transpose(image, (0, 2, 3, 1)), axis=0) - # UniformAugment Images ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) transforms_ua = [C.RandomCrop(size=[224, 224], padding=[32, 32, 32, 32]), @@ -147,7 +148,7 @@ def test_cpp_uniform_augment(plot=False, num_ops=2): ds_ua = ds_ua.batch(512) - for idx, (image,label) in enumerate(ds_ua): + for idx, (image, label) in enumerate(ds_ua): if idx == 0: images_ua = np.transpose(image, (0, 2, 3, 1)) else: @@ -163,6 +164,7 @@ def test_cpp_uniform_augment(plot=False, num_ops=2): mse[i] = np.mean((images_ua[i] - images_original[i]) ** 2) logger.info("MSE= {}".format(str(np.mean(mse)))) + def test_cpp_uniform_augment_exception_pyops(num_ops=2): """ Test UniformAugment invalid op in operations @@ -183,6 +185,7 @@ def test_cpp_uniform_augment_exception_pyops(num_ops=2): logger.info("Got an exception in DE: {}".format(str(e))) assert "operations" in str(e) + def test_cpp_uniform_augment_exception_large_numops(num_ops=6): """ Test UniformAugment invalid large number of ops @@ -202,6 +205,7 @@ def test_cpp_uniform_augment_exception_large_numops(num_ops=6): logger.info("Got an exception in DE: {}".format(str(e))) assert "num_ops" in str(e) + def test_cpp_uniform_augment_exception_nonpositive_numops(num_ops=0): """ Test UniformAugment invalid non-positive number of ops @@ -221,10 +225,10 @@ def test_cpp_uniform_augment_exception_nonpositive_numops(num_ops=0): logger.info("Got an exception in DE: {}".format(str(e))) assert "num_ops" in str(e) + if __name__ == "__main__": test_uniform_augment(num_ops=1) test_cpp_uniform_augment(num_ops=1) test_cpp_uniform_augment_exception_pyops(num_ops=1) test_cpp_uniform_augment_exception_large_numops(num_ops=6) test_cpp_uniform_augment_exception_nonpositive_numops(num_ops=0) - diff --git a/tests/ut/python/dataset/test_var_batch_map.py b/tests/ut/python/dataset/test_var_batch_map.py index 24982afc6c..8031446dda 100644 --- a/tests/ut/python/dataset/test_var_batch_map.py +++ b/tests/ut/python/dataset/test_var_batch_map.py @@ -77,7 +77,8 @@ def test_variable_size_batch(): return ([np.copy(arr) for arr in colList],) def test_repeat_batch(gen_num, r, drop, func, res): - data1 = ds.GeneratorDataset((lambda: gen(gen_num)), ["num"]).repeat(r).batch(batch_size=func, drop_remainder=drop) + data1 = ds.GeneratorDataset((lambda: gen(gen_num)), ["num"]).repeat(r).batch(batch_size=func, + drop_remainder=drop) for item in data1.create_dict_iterator(): res.append(item["num"]) @@ -91,7 +92,8 @@ def test_variable_size_batch(): return res def test_batch_repeat(gen_num, r, drop, func, res): - data1 = ds.GeneratorDataset((lambda: gen(gen_num)), ["num"]).batch(batch_size=func, drop_remainder=drop).repeat(r) + data1 = ds.GeneratorDataset((lambda: gen(gen_num)), ["num"]).batch(batch_size=func, drop_remainder=drop).repeat( + r) for item in data1.create_dict_iterator(): res.append(item["num"]) diff --git a/tests/ut/python/dataset/test_zip.py b/tests/ut/python/dataset/test_zip.py index b701c92034..557c12f683 100644 --- a/tests/ut/python/dataset/test_zip.py +++ b/tests/ut/python/dataset/test_zip.py @@ -16,8 +16,6 @@ import mindspore.dataset as ds from mindspore import log as logger from util import save_and_check_dict, save_and_check_md5 - - # Dataset in DIR_1 has 5 rows and 5 columns DATA_DIR_1 = ["../data/dataset/testTFBert5Rows1/5TFDatas.data"] SCHEMA_DIR_1 = "../data/dataset/testTFBert5Rows1/datasetSchema.json" diff --git a/tests/ut/python/dataset/util.py b/tests/ut/python/dataset/util.py index 8f971fde1f..66c42ded66 100644 --- a/tests/ut/python/dataset/util.py +++ b/tests/ut/python/dataset/util.py @@ -18,7 +18,7 @@ import os import hashlib import numpy as np import matplotlib.pyplot as plt -#import jsbeautifier +# import jsbeautifier from mindspore import log as logger # These are the column names defined in the testTFTestAllTypes dataset diff --git a/tests/ut/python/hccl_test/manage/api.py b/tests/ut/python/hccl_test/manage/api.py index 8dac167a3f..f6b60b3d2e 100644 --- a/tests/ut/python/hccl_test/manage/api.py +++ b/tests/ut/python/hccl_test/manage/api.py @@ -15,6 +15,7 @@ """api definition""" import threading + class Hccl(): """Hccl definition""" _instance_lock = threading.Lock() @@ -51,6 +52,7 @@ class Hccl(): def rank_size(self, size): self._rank_size = size + # pylint: disable=unused-argument def get_rank_id(group=None): hccl = Hccl() @@ -65,18 +67,22 @@ def get_rank_size(group=None): return int(group.split("-")[0]) raise ValueError + # pylint: disable=unused-argument def get_world_rank_from_group_rank(group, group_rank_id): return group_rank_id + # pylint: disable=unused-argument def get_group_rank_from_world_rank(world_rank_id, group): return world_rank_id + # pylint: disable=unused-argument def create_group(group, rank_size, rank_ids): pass + # pylint: disable=unused-argument def destroy_group(group): pass diff --git a/tests/ut/python/mindrecord/skip_test_issue.py b/tests/ut/python/mindrecord/skip_test_issue.py index 29b952fb45..75a55e543d 100644 --- a/tests/ut/python/mindrecord/skip_test_issue.py +++ b/tests/ut/python/mindrecord/skip_test_issue.py @@ -27,6 +27,7 @@ CV_FILE_NAME = "./imagenet.mindrecord" NLP_FILE_NAME = "./aclImdb.mindrecord" MKV_FILE_NAME = "./vehPer.mindrecord" + def test_cv_file_writer_default_shard_num(): """test cv dataset writer when shard_num is default value.""" writer = FileWriter(CV_FILE_NAME) @@ -45,6 +46,7 @@ def test_cv_file_writer_default_shard_num(): os.remove("{}".format(CV_FILE_NAME)) os.remove("{}.db".format(CV_FILE_NAME)) + def test_cv_file_writer_shard_num_10(): """test cv dataset writer when shard_num equals 10.""" shard_num = 10 @@ -67,6 +69,7 @@ def test_cv_file_writer_shard_num_10(): os.remove("{}".format(item)) os.remove("{}.db".format(item)) + def test_cv_file_writer_file_name_none(): """test cv dataset writer when file_name is none.""" with pytest.raises(Exception) as e: @@ -75,6 +78,7 @@ def test_cv_file_writer_file_name_none(): "error_msg: Invalid parameter value." \ " File path is not allowed None or empty!" + def test_cv_file_writer_file_name_null(): """test cv dataset writer when file_name is empty string.""" with pytest.raises(Exception) as e: @@ -83,6 +87,7 @@ def test_cv_file_writer_file_name_null(): "error_msg: Invalid parameter value." \ " File path is not allowed None or empty!" + def test_cv_file_writer_shard_number_less_1(): """test cv dataset writer when shard_num is less than 1.""" with pytest.raises(Exception) as e: @@ -91,6 +96,7 @@ def test_cv_file_writer_shard_number_less_1(): "error_msg: Invalid parameter value. " \ "Shard number should " in str(e.value) + def test_cv_file_writer_shard_number_more_1000(): """test cv dataset writer when shard_num is greater than 1000.""" with pytest.raises(Exception) as e: @@ -99,6 +105,7 @@ def test_cv_file_writer_shard_number_more_1000(): "error_msg: Invalid parameter value. " \ "Shard number should " in str(e.value) + def test_add_empty_schema(): """test schema add when schema is empty.""" header = ShardHeader() @@ -111,6 +118,7 @@ def test_add_empty_schema(): assert str(e.value) == "[MRMBuildSchemaError]: error_code: 1347690609, " \ "error_msg: Failed to build schema." + def test_add_schema_without_desc(): """test schema add without desc.""" header = ShardHeader() @@ -121,22 +129,24 @@ def test_add_schema_without_desc(): schema_id = header.add_schema(schema) # add schema assert schema_id == 0 + def test_add_empty_index(): """test index add when index fields is empty string.""" schema_json = {"file_name": {"type": "string"}, "label": {"type": "number"}} header = ShardHeader() - schema = header.build_schema(schema_json, ["data"], "img") # create schema - header.add_schema(schema) # add schema + schema = header.build_schema(schema_json, ["data"], "img") # create schema + header.add_schema(schema) # add schema with pytest.raises(Exception, match="incompatible"): header.add_index_fields("") + def test_file_writer_fail_add_index(): """test file writer, read when failed on adding index.""" data_raw = get_data("../data/mindrecord/testImageNetData/") schema_json = {"file_name": {"type": "string"}, "label": {"type": "number"}} header = ShardHeader() - schema = header.build_schema(schema_json, ["data"], "img") # create schema - schema_id = header.add_schema(schema) # add schema + schema = header.build_schema(schema_json, ["data"], "img") # create schema + schema_id = header.add_schema(schema) # add schema with pytest.raises(TypeError, match="missing 1 "): ret = header.add_index_fields() assert ret == FAILED @@ -146,15 +156,15 @@ def test_file_writer_fail_add_index(): ret = header.add_index_fields(index_fields) assert ret == FAILED - file_name = os.path.join(os.getcwd(), "test_001.mindrecord") # set output filename - writer = ShardWriter() # test_file_writer + file_name = os.path.join(os.getcwd(), "test_001.mindrecord") # set output filename + writer = ShardWriter() # test_file_writer ret = writer.open([file_name]) assert ret == SUCCESS, 'failed on opening files.' - ret = writer.set_shard_header(header) # write header + ret = writer.set_shard_header(header) # write header assert ret == SUCCESS, 'failed on setting header.' ret = writer.write_raw_cv_data({schema_id: data_raw}) assert ret == SUCCESS, 'failed on writing raw data.' - ret = writer.commit() # commit data + ret = writer.commit() # commit data assert ret == SUCCESS, "commit failed" # ShardIndexGenerator generator = ShardIndexGenerator(os.path.realpath(file_name)) @@ -179,6 +189,7 @@ def test_file_writer_fail_add_index(): os.remove("{}".format(file_name)) os.remove("{}.db".format(file_name)) + def test_add_index_with_incorrect_field(): """test index add with incorrect field(64).""" header = ShardHeader() @@ -189,6 +200,7 @@ def test_add_index_with_incorrect_field(): with pytest.raises(Exception, match="incompatible function arguments"): header.add_index_fields([(-1, "id")]) + def test_add_index_with_string_list(): """test index add with list of string(64).""" header = ShardHeader() @@ -199,6 +211,7 @@ def test_add_index_with_string_list(): ret = header.add_index_fields(["id", "label"]) assert ret == SUCCESS + def test_add_index_with_dict(): """test index add when index fields' datatype is dict(64).""" writer = FileWriter(MKV_FILE_NAME, FILES_NUM) @@ -213,6 +226,7 @@ def test_add_index_with_dict(): "error_msg: Invalid parameter type. " \ "'index_fields' expect list type." + def test_mkv_file_reader_with_negative_num_consumer(): """test mkv file reader when the number of consumer is negative.""" writer = FileWriter(MKV_FILE_NAME, FILES_NUM) @@ -236,6 +250,7 @@ def test_mkv_file_reader_with_negative_num_consumer(): os.remove("{}".format(x)) os.remove("{}.db".format(x)) + def test_write_raw_data_with_empty_list(): """test write raw data with empty list.""" writer = FileWriter(CV_FILE_NAME, FILES_NUM) @@ -258,6 +273,7 @@ def test_write_raw_data_with_empty_list(): os.remove("{}".format(x)) os.remove("{}.db".format(x)) + def test_issue_38(): """test cv dataset writer when schema does not match raw data.""" writer = FileWriter(CV_FILE_NAME, 1) @@ -274,6 +290,7 @@ def test_issue_38(): "Detail: Could not set blob field " \ "'file_name' as index field." + def test_issue_39(): """test cv dataset writer when schema fields' datatype does not match raw data.""" writer = FileWriter(CV_FILE_NAME, 1) @@ -294,6 +311,7 @@ def test_issue_39(): os.remove("{}".format(CV_FILE_NAME)) os.remove("{}.db".format(CV_FILE_NAME)) + def test_issue_40(): """test cv dataset when write raw data twice.""" writer = FileWriter(CV_FILE_NAME, 1) @@ -310,6 +328,7 @@ def test_issue_40(): os.remove("{}".format(CV_FILE_NAME)) os.remove("{}.db".format(CV_FILE_NAME)) + def test_issue_73(): """test file reader by column name.""" writer = FileWriter(MKV_FILE_NAME, FILES_NUM) @@ -334,6 +353,7 @@ def test_issue_73(): os.remove("{}".format(x)) os.remove("{}.db".format(x)) + def test_issue_117(): """test add schema when field type is incorrect.""" writer = FileWriter(__file__, FILES_NUM) @@ -349,6 +369,7 @@ def test_issue_117(): "contains illegal attributes"): writer.add_schema(schema, "img_schema") + def test_issue_95(): """test file reader when failed on file write.""" writer = FileWriter(__file__, FILES_NUM) @@ -373,6 +394,7 @@ def test_issue_95(): os.remove("{}".format(x)) os.remove("{}.db".format(x)) + def test_mindrecord_add_index_016(): """test index add when index fields are incorrect.""" schema_json = {"id": {"type": "number"}, "name": {"type": "string"}, @@ -384,6 +406,7 @@ def test_mindrecord_add_index_016(): with pytest.raises(Exception): header.add_index_fields(index_fields_list) + def test_mindrecord_add_index_011(): """test index add""" schema_json = {"id": {"type": "number"}, "name": {"type": "string"}, @@ -395,6 +418,7 @@ def test_mindrecord_add_index_011(): ret = header.add_index_fields(index_fields_list) assert ret == 0, 'failed on adding index fields.' + def test_issue_118(): """test file writer when raw data do not match schema.""" shard_num = 4 @@ -420,6 +444,7 @@ def test_issue_118(): os.remove("{}".format(item)) os.remove("{}.db".format(item)) + def test_issue_87(): """test file writer when data(bytes) do not match field type(string).""" shard_num = 4 @@ -438,6 +463,7 @@ def test_issue_87(): for item in paths: os.remove("{}".format(item)) + def test_issue_84(): """test file reader when db does not match.""" writer = FileWriter(CV_FILE_NAME, FILES_NUM) @@ -462,8 +488,8 @@ def test_issue_84(): "segment_ids": {"type": "array", "items": {"type": "number"}} } - writer.set_header_size(1<<14) - writer.set_page_size(1<<15) + writer.set_header_size(1 << 14) + writer.set_page_size(1 << 15) writer.add_schema(nlp_schema_json, "nlp_schema") writer.add_index(["id", "rating"]) writer.write_raw_data(data) @@ -493,6 +519,7 @@ def test_issue_84(): os.remove("{}".format(item)) os.remove("{}.db".format(item)) + def test_issue_65(): """test file reader when file name is illegal.""" reader = ShardReader() @@ -503,6 +530,7 @@ def test_issue_65(): "error_msg: " \ "MindRecord File could not open successfully." + def skip_test_issue_155(): """test file writer loop.""" writer = FileWriter(CV_FILE_NAME, FILES_NUM) @@ -520,6 +548,7 @@ def skip_test_issue_155(): count += 1 assert count == 10000, "Failed to read mutiple writed data." + def test_issue_124(): """test file writer when data(string) do not match field type(bytes).""" shard_num = 4 @@ -544,6 +573,7 @@ def test_issue_124(): os.remove("{}".format(item)) os.remove("{}.db".format(item)) + def test_issue_36(): """test file writer when shard num is illegal.""" with pytest.raises(ParamValueError, match="Shard number should between "): @@ -559,6 +589,7 @@ def test_issue_36(): writer.write_raw_data(data) writer.commit() + def test_issue_34(): """test file writer""" writer = FileWriter(CV_FILE_NAME) @@ -574,12 +605,13 @@ def test_issue_34(): i = 0 for index, x in enumerate(reader.get_next()): logger.info("#item{}: {}".format(index, x)) - i = i+1 + i = i + 1 logger.info("count: {}".format(i)) reader.close() os.remove(CV_FILE_NAME) os.remove("{}.db".format(CV_FILE_NAME)) + def test_file_writer_raw_data_038(): """test write raw data without verify.""" shard_num = 11 @@ -615,6 +647,7 @@ def test_file_writer_raw_data_038(): if os.path.exists("test_file_writer_raw_data_{}.db".format(n)): os.remove("test_file_writer_raw_data_{}.db".format(n)) + def test_more_than_1_bytes_in_schema(): """test file writer when schema contains multiple 'bytes' fields.""" schema_json = {"id": {"type": "string"}, "label": {"type": "number"}, @@ -627,6 +660,7 @@ def test_more_than_1_bytes_in_schema(): writer = FileWriter(CV_FILE_NAME, FILES_NUM) writer.add_schema(schema_json, "img_schema") + def test_shard_4_raw_data_1(): """test file writer when shard_num equals 4 and number of sample equals 1.""" writer = FileWriter(CV_FILE_NAME, FILES_NUM) diff --git a/tests/ut/python/mindrecord/skip_test_mindrecord_internal.py b/tests/ut/python/mindrecord/skip_test_mindrecord_internal.py index 48c0cbc0a0..6a7c2af434 100644 --- a/tests/ut/python/mindrecord/skip_test_mindrecord_internal.py +++ b/tests/ut/python/mindrecord/skip_test_mindrecord_internal.py @@ -19,8 +19,10 @@ from multiprocessing import cpu_count from mindspore.mindrecord import MAX_CONSUMER_COUNT + def test_c_layer_thread_num_with_python_layer(): assert cpu_count() == MAX_CONSUMER_COUNT() + if __name__ == "__main__": - test_c_layer_thread_num_with_python_layer() \ No newline at end of file + test_c_layer_thread_num_with_python_layer() diff --git a/tests/ut/python/mindrecord/skip_test_mindrecord_shard.py b/tests/ut/python/mindrecord/skip_test_mindrecord_shard.py index b673e90635..0e3711e71c 100644 --- a/tests/ut/python/mindrecord/skip_test_mindrecord_shard.py +++ b/tests/ut/python/mindrecord/skip_test_mindrecord_shard.py @@ -25,6 +25,7 @@ CV_FILE_NAME = "./imagenet.mindrecord" NLP_FILE_NAME = "./aclImdb.mindrecord" MKV_FILE_NAME = "./vehPer.mindrecord" + def test_nlp_file_writer(): """test nlp file writer using shard api""" schema_json = {"id": {"type": "string"}, "label": {"type": "number"}, @@ -62,6 +63,7 @@ def test_nlp_file_writer(): generator.build() generator.write_to_db() + def test_nlp_file_reader(): """test nlp file reader using shard api""" dataset = ShardReader() @@ -77,6 +79,7 @@ def test_nlp_file_reader(): dataset.finish() dataset.close() + def test_nlp_page_reader(): """test nlp page reader using shard api""" reader = ShardSegment() @@ -103,6 +106,7 @@ def test_nlp_page_reader(): os.remove("{}".format(x)) os.remove("{}.db".format(x)) + def test_cv_file_writer(): """test cv file reader using shard api""" img_schema_json = {"file_name": {"type": "string"}, @@ -134,6 +138,7 @@ def test_cv_file_writer(): generator.build() generator.write_to_db() + def test_cv_file_reader(): """test cv file reader using shard api""" dataset = ShardReader() @@ -151,6 +156,7 @@ def test_cv_file_reader(): dataset.finish() dataset.close() + def test_cv_page_reader(): """test cv page reader using shard api""" reader = ShardSegment() @@ -176,6 +182,7 @@ def test_cv_page_reader(): os.remove("{}".format(x)) os.remove("{}.db".format(x)) + def test_mkv_file_writer(): """test mkv file writer using shard api""" data = get_mkv_data("../data/mindrecord/testVehPerData/") @@ -206,6 +213,7 @@ def test_mkv_file_writer(): generator.build() generator.write_to_db() + def test_mkv_page_reader(): """test mkv page reader using shard api""" reader = ShardSegment() @@ -226,6 +234,7 @@ def test_mkv_page_reader(): img2 = reader.read_at_page_by_name("2", 0, 1) logger.info("img2 len: {}, img2[0] len: {}, img2[0]: {}".format(len(img2), len(img2[0]), img2[0])) + def test_mkv_page_reader_random(): """test mkv page random reader using shard api""" reader = ShardSegment() @@ -248,6 +257,7 @@ def test_mkv_page_reader_random(): os.remove("{}".format(x)) os.remove("{}.db".format(x)) + def test_mkv_file_writer_with_exactly_schema(): """test mkv file writer using shard api""" header = ShardHeader() @@ -314,6 +324,7 @@ def test_mkv_file_writer_with_exactly_schema(): generator.build() generator.write_to_db() + def test_mkv_file_reader_with_exactly_schema(): """test mkv file reader using shard api""" dataset = ShardReader() diff --git a/tests/ut/python/mindrecord/test_cifar100_to_mindrecord.py b/tests/ut/python/mindrecord/test_cifar100_to_mindrecord.py index e95f25aae4..6b1fcfdc25 100644 --- a/tests/ut/python/mindrecord/test_cifar100_to_mindrecord.py +++ b/tests/ut/python/mindrecord/test_cifar100_to_mindrecord.py @@ -24,6 +24,7 @@ from mindspore import log as logger CIFAR100_DIR = "../data/mindrecord/testCifar100Data" MINDRECORD_FILE = "./cifar100.mindrecord" + def test_cifar100_to_mindrecord_without_index_fields(): """test transform cifar100 dataset to mindrecord without index fields.""" cifar100_transformer = Cifar100ToMR(CIFAR100_DIR, MINDRECORD_FILE) @@ -38,6 +39,7 @@ def test_cifar100_to_mindrecord_without_index_fields(): os.remove("{}".format(MINDRECORD_FILE + "_test")) os.remove("{}.db".format(MINDRECORD_FILE + "_test")) + def test_cifar100_to_mindrecord(): """test transform cifar100 dataset to mindrecord.""" cifar100_transformer = Cifar100ToMR(CIFAR100_DIR, MINDRECORD_FILE) @@ -51,6 +53,7 @@ def test_cifar100_to_mindrecord(): os.remove("{}".format(MINDRECORD_FILE + "_test")) os.remove("{}.db".format(MINDRECORD_FILE + "_test")) + def read(): """test file reader""" count = 0 @@ -73,6 +76,7 @@ def read(): assert count == 4 reader.close() + def test_cifar100_to_mindrecord_illegal_file_name(): """ test transform cifar100 dataset to mindrecord @@ -83,6 +87,7 @@ def test_cifar100_to_mindrecord_illegal_file_name(): cifar100_transformer = Cifar100ToMR(CIFAR100_DIR, filename) cifar100_transformer.transform() + def test_cifar100_to_mindrecord_filename_start_with_space(): """ test transform cifar10 dataset to mindrecord @@ -94,6 +99,7 @@ def test_cifar100_to_mindrecord_filename_start_with_space(): cifar100_transformer = Cifar100ToMR(CIFAR100_DIR, filename) cifar100_transformer.transform() + def test_cifar100_to_mindrecord_filename_contain_space(): """ test transform cifar10 dataset to mindrecord @@ -110,6 +116,7 @@ def test_cifar100_to_mindrecord_filename_contain_space(): os.remove("{}".format(filename + "_test")) os.remove("{}.db".format(filename + "_test")) + def test_cifar100_to_mindrecord_directory(): """ test transform cifar10 dataset to mindrecord @@ -121,6 +128,7 @@ def test_cifar100_to_mindrecord_directory(): CIFAR100_DIR) cifar100_transformer.transform() + def test_cifar100_to_mindrecord_filename_equals_cifar100(): """ test transform cifar10 dataset to mindrecord diff --git a/tests/ut/python/mindrecord/test_cifar10_to_mindrecord.py b/tests/ut/python/mindrecord/test_cifar10_to_mindrecord.py index bff06e4e72..a86b716cd0 100644 --- a/tests/ut/python/mindrecord/test_cifar10_to_mindrecord.py +++ b/tests/ut/python/mindrecord/test_cifar10_to_mindrecord.py @@ -23,6 +23,7 @@ from mindspore import log as logger CIFAR10_DIR = "../data/mindrecord/testCifar10Data" MINDRECORD_FILE = "./cifar10.mindrecord" + def test_cifar10_to_mindrecord_without_index_fields(): """test transform cifar10 dataset to mindrecord without index fields.""" cifar10_transformer = Cifar10ToMR(CIFAR10_DIR, MINDRECORD_FILE) @@ -36,6 +37,7 @@ def test_cifar10_to_mindrecord_without_index_fields(): os.remove("{}".format(MINDRECORD_FILE + "_test")) os.remove("{}.db".format(MINDRECORD_FILE + "_test")) + def test_cifar10_to_mindrecord(): """test transform cifar10 dataset to mindrecord.""" cifar10_transformer = Cifar10ToMR(CIFAR10_DIR, MINDRECORD_FILE) @@ -49,6 +51,7 @@ def test_cifar10_to_mindrecord(): os.remove("{}".format(MINDRECORD_FILE + "_test")) os.remove("{}.db".format(MINDRECORD_FILE + "_test")) + def test_cifar10_to_mindrecord_with_return(): """test transform cifar10 dataset to mindrecord.""" cifar10_transformer = Cifar10ToMR(CIFAR10_DIR, MINDRECORD_FILE) @@ -63,6 +66,7 @@ def test_cifar10_to_mindrecord_with_return(): os.remove("{}".format(MINDRECORD_FILE + "_test")) os.remove("{}.db".format(MINDRECORD_FILE + "_test")) + def read(): """test file reader""" count = 0 @@ -85,6 +89,7 @@ def read(): assert count == 4 reader.close() + def test_cifar10_to_mindrecord_illegal_file_name(): """ test transform cifar10 dataset to mindrecord @@ -95,6 +100,7 @@ def test_cifar10_to_mindrecord_illegal_file_name(): cifar10_transformer = Cifar10ToMR(CIFAR10_DIR, filename) cifar10_transformer.transform() + def test_cifar10_to_mindrecord_filename_start_with_space(): """ test transform cifar10 dataset to mindrecord @@ -106,6 +112,7 @@ def test_cifar10_to_mindrecord_filename_start_with_space(): cifar10_transformer = Cifar10ToMR(CIFAR10_DIR, filename) cifar10_transformer.transform() + def test_cifar10_to_mindrecord_filename_contain_space(): """ test transform cifar10 dataset to mindrecord @@ -122,6 +129,7 @@ def test_cifar10_to_mindrecord_filename_contain_space(): os.remove("{}".format(filename + "_test")) os.remove("{}.db".format(filename + "_test")) + def test_cifar10_to_mindrecord_directory(): """ test transform cifar10 dataset to mindrecord @@ -132,6 +140,7 @@ def test_cifar10_to_mindrecord_directory(): cifar10_transformer = Cifar10ToMR(CIFAR10_DIR, CIFAR10_DIR) cifar10_transformer.transform() + def test_cifar10_to_mindrecord_filename_equals_cifar10(): """ test transform cifar10 dataset to mindrecord diff --git a/tests/ut/python/mindrecord/test_imagenet_to_mindrecord.py b/tests/ut/python/mindrecord/test_imagenet_to_mindrecord.py index 5634f3a0a7..6e01b904b6 100644 --- a/tests/ut/python/mindrecord/test_imagenet_to_mindrecord.py +++ b/tests/ut/python/mindrecord/test_imagenet_to_mindrecord.py @@ -24,6 +24,7 @@ IMAGENET_IMAGE_DIR = "../data/mindrecord/testImageNetDataWhole/images" MINDRECORD_FILE = "../data/mindrecord/testImageNetDataWhole/imagenet.mindrecord" PARTITION_NUMBER = 4 + def read(filename): """test file reade""" count = 0 @@ -36,6 +37,7 @@ def read(filename): assert count == 20 reader.close() + def test_imagenet_to_mindrecord(): """test transform imagenet dataset to mindrecord.""" imagenet_transformer = ImageNetToMR(IMAGENET_MAP_FILE, IMAGENET_IMAGE_DIR, @@ -49,6 +51,7 @@ def test_imagenet_to_mindrecord(): os.remove(MINDRECORD_FILE + str(i)) os.remove(MINDRECORD_FILE + str(i) + ".db") + def test_imagenet_to_mindrecord_default_partition_number(): """ test transform imagenet dataset to mindrecord @@ -63,6 +66,7 @@ def test_imagenet_to_mindrecord_default_partition_number(): os.remove("{}".format(MINDRECORD_FILE)) os.remove("{}.db".format(MINDRECORD_FILE)) + def test_imagenet_to_mindrecord_partition_number_0(): """ test transform imagenet dataset to mindrecord @@ -74,6 +78,7 @@ def test_imagenet_to_mindrecord_partition_number_0(): MINDRECORD_FILE, 0) imagenet_transformer.transform() + def test_imagenet_to_mindrecord_partition_number_none(): """ test transform imagenet dataset to mindrecord @@ -86,6 +91,7 @@ def test_imagenet_to_mindrecord_partition_number_none(): MINDRECORD_FILE, None) imagenet_transformer.transform() + def test_imagenet_to_mindrecord_illegal_filename(): """ test transform imagenet dataset to mindrecord diff --git a/tests/ut/python/mindrecord/test_mindrecord_base.py b/tests/ut/python/mindrecord/test_mindrecord_base.py index da424122b0..75762cefbc 100644 --- a/tests/ut/python/mindrecord/test_mindrecord_base.py +++ b/tests/ut/python/mindrecord/test_mindrecord_base.py @@ -26,6 +26,7 @@ CV2_FILE_NAME = "./imagenet_loop.mindrecord" CV3_FILE_NAME = "./imagenet_append.mindrecord" NLP_FILE_NAME = "./aclImdb.mindrecord" + def test_write_read_process(): mindrecord_file_name = "test.mindrecord" data = [{"file_name": "001.jpg", "label": 43, "score": 0.8, "mask": np.array([3, 6, 9], dtype=np.int64), @@ -75,6 +76,7 @@ def test_write_read_process(): os.remove("{}".format(mindrecord_file_name)) os.remove("{}.db".format(mindrecord_file_name)) + def test_write_read_process_with_define_index_field(): mindrecord_file_name = "test.mindrecord" data = [{"file_name": "001.jpg", "label": 43, "score": 0.8, "mask": np.array([3, 6, 9], dtype=np.int64), @@ -125,6 +127,7 @@ def test_write_read_process_with_define_index_field(): os.remove("{}".format(mindrecord_file_name)) os.remove("{}.db".format(mindrecord_file_name)) + def test_cv_file_writer_tutorial(): """tutorial for cv dataset writer.""" writer = FileWriter(CV_FILE_NAME, FILES_NUM) @@ -136,6 +139,7 @@ def test_cv_file_writer_tutorial(): writer.write_raw_data(data) writer.commit() + def test_cv_file_append_writer(): """tutorial for cv dataset append writer.""" writer = FileWriter(CV3_FILE_NAME, 4) @@ -164,6 +168,7 @@ def test_cv_file_append_writer(): os.remove("{}".format(x)) os.remove("{}.db".format(x)) + def test_cv_file_writer_loop_and_read(): """tutorial for cv dataset loop writer.""" writer = FileWriter(CV2_FILE_NAME, FILES_NUM) @@ -191,6 +196,7 @@ def test_cv_file_writer_loop_and_read(): os.remove("{}".format(x)) os.remove("{}.db".format(x)) + def test_cv_file_reader_tutorial(): """tutorial for cv file reader.""" reader = FileReader(CV_FILE_NAME + "0") @@ -202,6 +208,7 @@ def test_cv_file_reader_tutorial(): assert count == 10 reader.close() + def test_cv_file_reader_file_list(): """tutorial for cv file partial reader.""" reader = FileReader([CV_FILE_NAME + str(x) for x in range(FILES_NUM)]) @@ -212,6 +219,7 @@ def test_cv_file_reader_file_list(): logger.info("#item{}: {}".format(index, x)) assert count == 10 + def test_cv_file_reader_partial_tutorial(): """tutorial for cv file partial reader.""" reader = FileReader(CV_FILE_NAME + "0") @@ -224,11 +232,12 @@ def test_cv_file_reader_partial_tutorial(): reader.finish() assert count == 5 + def test_cv_page_reader_tutorial(): """tutorial for cv page reader.""" reader = MindPage(CV_FILE_NAME + "0") fields = reader.get_category_fields() - assert fields == ['file_name', 'label'],\ + assert fields == ['file_name', 'label'], \ 'failed on getting candidate category fields.' ret = reader.set_category_field("label") @@ -247,11 +256,12 @@ def test_cv_page_reader_tutorial(): assert len(row1[0]) == 3 assert row1[0]['label'] == 822 + def test_cv_page_reader_tutorial_by_file_name(): """tutorial for cv page reader.""" reader = MindPage(CV_FILE_NAME + "0") fields = reader.get_category_fields() - assert fields == ['file_name', 'label'],\ + assert fields == ['file_name', 'label'], \ 'failed on getting candidate category fields.' ret = reader.set_category_field("file_name") @@ -270,11 +280,12 @@ def test_cv_page_reader_tutorial_by_file_name(): assert len(row1[0]) == 3 assert row1[0]['label'] == 13 + def test_cv_page_reader_tutorial_new_api(): """tutorial for cv page reader.""" reader = MindPage(CV_FILE_NAME + "0") fields = reader.candidate_fields - assert fields == ['file_name', 'label'],\ + assert fields == ['file_name', 'label'], \ 'failed on getting candidate category fields.' reader.category_field = "file_name" @@ -298,6 +309,7 @@ def test_cv_page_reader_tutorial_new_api(): os.remove("{}".format(x)) os.remove("{}.db".format(x)) + def test_nlp_file_writer_tutorial(): """tutorial for nlp file writer.""" writer = FileWriter(NLP_FILE_NAME, FILES_NUM) @@ -318,6 +330,7 @@ def test_nlp_file_writer_tutorial(): writer.write_raw_data(data) writer.commit() + def test_nlp_file_reader_tutorial(): """tutorial for nlp file reader.""" reader = FileReader(NLP_FILE_NAME + "0") @@ -329,11 +342,12 @@ def test_nlp_file_reader_tutorial(): assert count == 10 reader.close() + def test_nlp_page_reader_tutorial(): """tutorial for nlp page reader.""" reader = MindPage(NLP_FILE_NAME + "0") fields = reader.get_category_fields() - assert fields == ['id', 'rating'],\ + assert fields == ['id', 'rating'], \ 'failed on getting candidate category fields.' ret = reader.set_category_field("rating") @@ -358,6 +372,7 @@ def test_nlp_page_reader_tutorial(): os.remove("{}".format(x)) os.remove("{}.db".format(x)) + def test_cv_file_writer_shard_num_10(): """test file writer when shard num equals 10.""" writer = FileWriter(CV_FILE_NAME, 10) @@ -370,11 +385,12 @@ def test_cv_file_writer_shard_num_10(): writer.commit() paths = ["{}{}".format(CV_FILE_NAME, str(x).rjust(1, '0')) - for x in range(10)] + for x in range(10)] for x in paths: os.remove("{}".format(x)) os.remove("{}.db".format(x)) + def test_cv_file_writer_absolute_path(): """test cv file writer when file name is absolute path.""" file_name = "/tmp/" + str(uuid.uuid4()) @@ -393,6 +409,7 @@ def test_cv_file_writer_absolute_path(): os.remove("{}".format(x)) os.remove("{}.db".format(x)) + def test_cv_file_writer_without_data(): """test cv file writer without data.""" writer = FileWriter(CV_FILE_NAME, 1) @@ -411,6 +428,7 @@ def test_cv_file_writer_without_data(): os.remove(CV_FILE_NAME) os.remove("{}.db".format(CV_FILE_NAME)) + def test_cv_file_writer_no_blob(): """test cv file writer without blob data.""" writer = FileWriter(CV_FILE_NAME, 1) @@ -432,18 +450,19 @@ def test_cv_file_writer_no_blob(): os.remove(CV_FILE_NAME) os.remove("{}.db".format(CV_FILE_NAME)) + def test_cv_file_writer_no_raw(): """test cv file writer without raw data.""" writer = FileWriter(NLP_FILE_NAME) data = list(get_nlp_data("../data/mindrecord/testAclImdbData/pos", - "../data/mindrecord/testAclImdbData/vocab.txt", - 10)) + "../data/mindrecord/testAclImdbData/vocab.txt", + 10)) nlp_schema_json = {"input_ids": {"type": "int64", - "shape": [1, -1]}, - "input_mask": {"type": "int64", - "shape": [1, -1]}, - "segment_ids": {"type": "int64", - "shape": [1, -1]} + "shape": [1, -1]}, + "input_mask": {"type": "int64", + "shape": [1, -1]}, + "segment_ids": {"type": "int64", + "shape": [1, -1]} } writer.add_schema(nlp_schema_json, "no_raw_schema") writer.write_raw_data(data) @@ -459,6 +478,7 @@ def test_cv_file_writer_no_raw(): os.remove(NLP_FILE_NAME) os.remove("{}.db".format(NLP_FILE_NAME)) + def test_write_read_process_with_multi_bytes(): mindrecord_file_name = "test.mindrecord" data = [{"file_name": "001.jpg", "label": 43, @@ -583,6 +603,7 @@ def test_write_read_process_with_multi_bytes(): os.remove("{}".format(mindrecord_file_name)) os.remove("{}.db".format(mindrecord_file_name)) + def test_write_read_process_with_multi_array(): mindrecord_file_name = "test.mindrecord" data = [{"source_sos_ids": np.array([1, 2, 3, 4, 5], dtype=np.int64), @@ -726,6 +747,7 @@ def test_write_read_process_with_multi_array(): os.remove("{}".format(mindrecord_file_name)) os.remove("{}.db".format(mindrecord_file_name)) + def test_write_read_process_with_multi_bytes_and_array(): mindrecord_file_name = "test.mindrecord" data = [{"file_name": "001.jpg", "label": 4, diff --git a/tests/ut/python/mindrecord/test_mindrecord_exception.py b/tests/ut/python/mindrecord/test_mindrecord_exception.py index 75a32eb347..7388409ba9 100644 --- a/tests/ut/python/mindrecord/test_mindrecord_exception.py +++ b/tests/ut/python/mindrecord/test_mindrecord_exception.py @@ -17,7 +17,7 @@ import os import pytest from mindspore.mindrecord import FileWriter, FileReader, MindPage, SUCCESS from mindspore.mindrecord import MRMOpenError, MRMGenerateIndexError, ParamValueError, MRMGetMetaError, \ - MRMFetchDataError + MRMFetchDataError from mindspore import log as logger from utils import get_data @@ -25,36 +25,43 @@ CV_FILE_NAME = "./imagenet.mindrecord" NLP_FILE_NAME = "./aclImdb.mindrecord" FILES_NUM = 4 + def test_cv_file_writer_shard_num_none(): """test cv file writer when shard num is None.""" with pytest.raises(Exception, match="Shard num is illegal."): FileWriter("/tmp/123454321", None) + def test_cv_file_writer_shard_num_str(): """test cv file writer when shard num is string.""" with pytest.raises(Exception, match="Shard num is illegal."): FileWriter("/tmp/123454321", "20") + def test_cv_page_reader_consumer_num_none(): """test cv page reader when consumer number is None.""" with pytest.raises(Exception, match="Consumer number is illegal."): MindPage(CV_FILE_NAME + "0", None) + def test_cv_page_reader_consumer_num_str(): """test cv page reader when consumer number is string.""" with pytest.raises(Exception, match="Consumer number is illegal."): MindPage(CV_FILE_NAME + "0", "2") + def test_nlp_file_reader_consumer_num_none(): """test nlp file reader when consumer number is None.""" with pytest.raises(Exception, match="Consumer number is illegal."): FileReader(NLP_FILE_NAME + "0", None) + def test_nlp_file_reader_consumer_num_str(): """test nlp file reader when consumer number is string.""" with pytest.raises(Exception, match="Consumer number is illegal."): FileReader(NLP_FILE_NAME + "0", "4") + def create_cv_mindrecord(files_num): writer = FileWriter(CV_FILE_NAME, files_num) data = get_data("../data/mindrecord/testImageNetData/") @@ -65,15 +72,17 @@ def create_cv_mindrecord(files_num): writer.write_raw_data(data) writer.commit() + def test_lack_partition_and_db(): """test file reader when mindrecord file does not exist.""" with pytest.raises(MRMOpenError) as err: reader = FileReader('dummy.mindrecord') reader.close() assert '[MRMOpenError]: error_code: 1347690596, ' \ - 'error_msg: MindRecord File could not open successfully.'\ + 'error_msg: MindRecord File could not open successfully.' \ in str(err.value) + def test_lack_db(): """test file reader when db file does not exist.""" create_cv_mindrecord(1) @@ -82,10 +91,11 @@ def test_lack_db(): reader = FileReader(CV_FILE_NAME) reader.close() assert '[MRMOpenError]: error_code: 1347690596, ' \ - 'error_msg: MindRecord File could not open successfully.'\ + 'error_msg: MindRecord File could not open successfully.' \ in str(err.value) os.remove(CV_FILE_NAME) + def test_lack_some_partition_and_db(): """test file reader when some partition and db do not exist.""" create_cv_mindrecord(4) @@ -97,7 +107,7 @@ def test_lack_some_partition_and_db(): reader = FileReader(CV_FILE_NAME + "0") reader.close() assert '[MRMOpenError]: error_code: 1347690596, ' \ - 'error_msg: MindRecord File could not open successfully.'\ + 'error_msg: MindRecord File could not open successfully.' \ in str(err.value) paths = ["{}{}".format(CV_FILE_NAME, str(x).rjust(1, '0')) for x in range(FILES_NUM)] @@ -107,6 +117,7 @@ def test_lack_some_partition_and_db(): if os.path.exists("{}.db".format(x)): os.remove("{}.db".format(x)) + def test_lack_some_partition_first(): """test file reader when first partition does not exist.""" create_cv_mindrecord(4) @@ -117,7 +128,7 @@ def test_lack_some_partition_first(): reader = FileReader(CV_FILE_NAME + "0") reader.close() assert '[MRMOpenError]: error_code: 1347690596, ' \ - 'error_msg: MindRecord File could not open successfully.'\ + 'error_msg: MindRecord File could not open successfully.' \ in str(err.value) for x in paths: if os.path.exists("{}".format(x)): @@ -125,6 +136,7 @@ def test_lack_some_partition_first(): if os.path.exists("{}.db".format(x)): os.remove("{}.db".format(x)) + def test_lack_some_partition_middle(): """test file reader when some partition does not exist.""" create_cv_mindrecord(4) @@ -135,7 +147,7 @@ def test_lack_some_partition_middle(): reader = FileReader(CV_FILE_NAME + "0") reader.close() assert '[MRMOpenError]: error_code: 1347690596, ' \ - 'error_msg: MindRecord File could not open successfully.'\ + 'error_msg: MindRecord File could not open successfully.' \ in str(err.value) for x in paths: if os.path.exists("{}".format(x)): @@ -143,6 +155,7 @@ def test_lack_some_partition_middle(): if os.path.exists("{}.db".format(x)): os.remove("{}.db".format(x)) + def test_lack_some_partition_last(): """test file reader when last partition does not exist.""" create_cv_mindrecord(4) @@ -153,7 +166,7 @@ def test_lack_some_partition_last(): reader = FileReader(CV_FILE_NAME + "0") reader.close() assert '[MRMOpenError]: error_code: 1347690596, ' \ - 'error_msg: MindRecord File could not open successfully.'\ + 'error_msg: MindRecord File could not open successfully.' \ in str(err.value) for x in paths: if os.path.exists("{}".format(x)): @@ -161,6 +174,7 @@ def test_lack_some_partition_last(): if os.path.exists("{}.db".format(x)): os.remove("{}.db".format(x)) + def test_mindpage_lack_some_partition(): """test page reader when some partition does not exist.""" create_cv_mindrecord(4) @@ -170,7 +184,7 @@ def test_mindpage_lack_some_partition(): with pytest.raises(MRMOpenError) as err: MindPage(CV_FILE_NAME + "0") assert '[MRMOpenError]: error_code: 1347690596, ' \ - 'error_msg: MindRecord File could not open successfully.'\ + 'error_msg: MindRecord File could not open successfully.' \ in str(err.value) for x in paths: if os.path.exists("{}".format(x)): @@ -178,6 +192,7 @@ def test_mindpage_lack_some_partition(): if os.path.exists("{}.db".format(x)): os.remove("{}.db".format(x)) + def test_lack_some_db(): """test file reader when some db does not exist.""" create_cv_mindrecord(4) @@ -188,7 +203,7 @@ def test_lack_some_db(): reader = FileReader(CV_FILE_NAME + "0") reader.close() assert '[MRMOpenError]: error_code: 1347690596, ' \ - 'error_msg: MindRecord File could not open successfully.'\ + 'error_msg: MindRecord File could not open successfully.' \ in str(err.value) for x in paths: if os.path.exists("{}".format(x)): @@ -196,6 +211,7 @@ def test_lack_some_db(): if os.path.exists("{}.db".format(x)): os.remove("{}.db".format(x)) + def test_invalid_mindrecord(): """test file reader when the content of mindrecord is illegal.""" with open(CV_FILE_NAME, 'w') as f: @@ -204,10 +220,11 @@ def test_invalid_mindrecord(): with pytest.raises(MRMOpenError) as err: FileReader(CV_FILE_NAME) assert '[MRMOpenError]: error_code: 1347690596, ' \ - 'error_msg: MindRecord File could not open successfully.'\ + 'error_msg: MindRecord File could not open successfully.' \ in str(err.value) os.remove(CV_FILE_NAME) + def test_invalid_db(): """test file reader when the content of db is illegal.""" create_cv_mindrecord(1) @@ -217,11 +234,12 @@ def test_invalid_db(): with pytest.raises(MRMOpenError) as err: FileReader('imagenet.mindrecord') assert '[MRMOpenError]: error_code: 1347690596, ' \ - 'error_msg: MindRecord File could not open successfully.'\ + 'error_msg: MindRecord File could not open successfully.' \ in str(err.value) os.remove("imagenet.mindrecord") os.remove("imagenet.mindrecord.db") + def test_overwrite_invalid_mindrecord(): """test file writer when overwrite invalid mindreocrd file.""" with open(CV_FILE_NAME, 'w') as f: @@ -229,10 +247,11 @@ def test_overwrite_invalid_mindrecord(): with pytest.raises(MRMOpenError) as err: create_cv_mindrecord(1) assert '[MRMOpenError]: error_code: 1347690596, ' \ - 'error_msg: MindRecord File could not open successfully.'\ + 'error_msg: MindRecord File could not open successfully.' \ in str(err.value) os.remove(CV_FILE_NAME) + def test_overwrite_invalid_db(): """test file writer when overwrite invalid db file.""" with open('imagenet.mindrecord.db', 'w') as f: @@ -244,6 +263,7 @@ def test_overwrite_invalid_db(): os.remove("imagenet.mindrecord") os.remove("imagenet.mindrecord.db") + def test_read_after_close(): """test file reader when close read.""" create_cv_mindrecord(1) @@ -257,6 +277,7 @@ def test_read_after_close(): os.remove(CV_FILE_NAME) os.remove("{}.db".format(CV_FILE_NAME)) + def test_file_read_after_read(): """test file reader when finish read.""" create_cv_mindrecord(1) @@ -276,24 +297,27 @@ def test_file_read_after_read(): os.remove(CV_FILE_NAME) os.remove("{}.db".format(CV_FILE_NAME)) + def test_cv_file_writer_shard_num_greater_than_1000(): """test cv file writer shard number greater than 1000.""" with pytest.raises(ParamValueError) as err: FileWriter(CV_FILE_NAME, 1001) assert 'Shard number should between' in str(err.value) + def test_add_index_without_add_schema(): with pytest.raises(MRMGetMetaError) as err: fw = FileWriter(CV_FILE_NAME) fw.add_index(["label"]) assert 'Failed to get meta info' in str(err.value) + def test_mindpage_pageno_pagesize_not_int(): """test page reader when some partition does not exist.""" create_cv_mindrecord(4) reader = MindPage(CV_FILE_NAME + "0") fields = reader.get_category_fields() - assert fields == ['file_name', 'label'],\ + assert fields == ['file_name', 'label'], \ 'failed on getting candidate category fields.' ret = reader.set_category_field("label") @@ -323,12 +347,13 @@ def test_mindpage_pageno_pagesize_not_int(): os.remove("{}".format(x)) os.remove("{}.db".format(x)) + def test_mindpage_filename_not_exist(): """test page reader when some partition does not exist.""" create_cv_mindrecord(4) reader = MindPage(CV_FILE_NAME + "0") fields = reader.get_category_fields() - assert fields == ['file_name', 'label'],\ + assert fields == ['file_name', 'label'], \ 'failed on getting candidate category fields.' ret = reader.set_category_field("file_name") diff --git a/tests/ut/python/mindrecord/test_mindrecord_multi_images.py b/tests/ut/python/mindrecord/test_mindrecord_multi_images.py index b26e44c5db..dd464c5fb4 100644 --- a/tests/ut/python/mindrecord/test_mindrecord_multi_images.py +++ b/tests/ut/python/mindrecord/test_mindrecord_multi_images.py @@ -24,6 +24,7 @@ DIFF_SHAPE_FILE_NAME = "../data/mindrecord/testImageNetData/cityscapes_train_19_ CV_FILE_NAME = "../data/mindrecord/testTwoImageData/two_bytes.mindrecord" FILES_NUM = 1 + def read(filename, fields_num=5): count = 0 reader = FileReader(filename) @@ -34,6 +35,7 @@ def read(filename, fields_num=5): assert count == 5 reader.close() + def test_write_two_images_mindrecord(): """test two images to mindrecord""" if os.path.exists("{}".format(CV_FILE_NAME + ".db")): @@ -55,6 +57,7 @@ def test_write_two_images_mindrecord(): if os.path.exists("{}".format(CV_FILE_NAME)): os.remove(CV_FILE_NAME) + def test_write_two_images_mindrecord_whole_field(): """test two images to mindrecord""" if os.path.exists("{}".format(CV_FILE_NAME + ".db")): @@ -63,9 +66,9 @@ def test_write_two_images_mindrecord_whole_field(): os.remove(CV_FILE_NAME) writer = FileWriter(CV_FILE_NAME, FILES_NUM) data = get_two_bytes_data(MAP_FILE_NAME) - cv_schema_json={"id": {"type": "int32"}, "file_name": {"type": "string"}, - "label_name": {"type": "string"}, "img_data": {"type": "bytes"}, - "label_data": {"type": "bytes"}} + cv_schema_json = {"id": {"type": "int32"}, "file_name": {"type": "string"}, + "label_name": {"type": "string"}, "img_data": {"type": "bytes"}, + "label_data": {"type": "bytes"}} writer.add_schema(cv_schema_json, "two_images_schema") writer.write_raw_data(data) writer.commit() @@ -78,6 +81,7 @@ def test_write_two_images_mindrecord_whole_field(): if os.path.exists("{}".format(CV_FILE_NAME)): os.remove(CV_FILE_NAME) + def test_write_two_diff_shape_images_mindrecord(): """test two different shape images to mindrecord""" if os.path.exists("{}".format(CV_FILE_NAME + ".db")): @@ -96,6 +100,7 @@ def test_write_two_diff_shape_images_mindrecord(): assert os.path.exists(CV_FILE_NAME + ".db") read(CV_FILE_NAME, bytes_num) + def test_write_multi_images_mindrecord(): """test multiple images to mindrecord""" if os.path.exists("{}".format(CV_FILE_NAME + ".db")): @@ -114,6 +119,7 @@ def test_write_multi_images_mindrecord(): assert os.path.exists(CV_FILE_NAME + ".db") read(CV_FILE_NAME, bytes_num) + def test_write_two_images_and_array_mindrecord(): """test two image images and array to mindrecord""" if os.path.exists("{}".format(CV_FILE_NAME + ".db")): @@ -138,7 +144,7 @@ def test_write_two_images_and_array_mindrecord(): writer.commit() assert os.path.exists(CV_FILE_NAME) assert os.path.exists(CV_FILE_NAME + ".db") - read(CV_FILE_NAME, bytes_num+2) + read(CV_FILE_NAME, bytes_num + 2) if os.path.exists("{}".format(CV_FILE_NAME + ".db")): os.remove(CV_FILE_NAME + ".db") diff --git a/tests/ut/python/mindrecord/utils.py b/tests/ut/python/mindrecord/utils.py index 3e4cf7abdf..729680cd4b 100644 --- a/tests/ut/python/mindrecord/utils.py +++ b/tests/ut/python/mindrecord/utils.py @@ -23,6 +23,7 @@ import numpy as np from mindspore import log as logger + def get_data(dir_name): """ Return raw data of imagenet dataset. @@ -55,6 +56,7 @@ def get_data(dir_name): continue return data_list + def get_two_bytes_data(file_name): """ Return raw data of two-bytes dataset. @@ -91,6 +93,7 @@ def get_two_bytes_data(file_name): continue return data_list + def get_multi_bytes_data(file_name, bytes_num=3): """ Return raw data of multi-bytes dataset. @@ -125,6 +128,7 @@ def get_multi_bytes_data(file_name, bytes_num=3): continue return data_list + def get_mkv_data(dir_name): """ Return raw data of Vehicle_and_Person dataset. @@ -163,7 +167,7 @@ def get_mkv_data(dir_name): "id": index} data_list.append(data_json) index += 1 - logger.info('{} images are missing'.format(len(file_list)-len(data_list))) + logger.info('{} images are missing'.format(len(file_list) - len(data_list))) return data_list @@ -212,6 +216,7 @@ def get_nlp_data(dir_name, vocab_file, num): } yield data + def convert_to_uni(text): if isinstance(text, str): return text @@ -219,6 +224,7 @@ def convert_to_uni(text): return text.decode('utf-8', 'ignore') raise Exception("The type %s does not convert!" % type(text)) + def load_vocab(vocab_file): """load vocabulary to translate statement.""" vocab = collections.OrderedDict() @@ -235,11 +241,12 @@ def load_vocab(vocab_file): index += 1 return vocab + def inputs(vectors, maxlen=50): length = len(vectors) if length > maxlen: - return vectors[0:maxlen], [1]*maxlen, [0]*maxlen - input_ = vectors+[0]*(maxlen-length) - mask = [1]*length + [0]*(maxlen-length) - segment = [0]*maxlen + return vectors[0:maxlen], [1] * maxlen, [0] * maxlen + input_ = vectors + [0] * (maxlen - length) + mask = [1] * length + [0] * (maxlen - length) + segment = [0] * maxlen return input_, mask, segment diff --git a/tests/ut/python/onnx/test_onnx.py b/tests/ut/python/onnx/test_onnx.py index 9586ec86c6..b89a70c9e4 100644 --- a/tests/ut/python/onnx/test_onnx.py +++ b/tests/ut/python/onnx/test_onnx.py @@ -30,13 +30,15 @@ from mindspore.train.serialization import export context.set_context(mode=context.GRAPH_MODE) -def is_enable_onnxruntime(): - val = os.getenv("ENABLE_ONNXRUNTIME", "False") - if val in ('ON', 'on', 'TRUE', 'True', 'true'): - return True - return False - -run_on_onnxruntime = pytest.mark.skipif(not is_enable_onnxruntime(), reason="Only support running on onnxruntime") + +def is_enable_onnxruntime(): + val = os.getenv("ENABLE_ONNXRUNTIME", "False") + if val in ('ON', 'on', 'TRUE', 'True', 'true'): + return True + return False + + +run_on_onnxruntime = pytest.mark.skipif(not is_enable_onnxruntime(), reason="Only support running on onnxruntime") def setup_module(): @@ -80,6 +82,7 @@ def test_batchnorm_train_onnx_export(): os.chmod(onnx_file, stat.S_IWRITE) os.remove(onnx_file) + class LeNet5(nn.Cell): """LeNet5 definition""" @@ -207,4 +210,3 @@ def test_onnx_export_load_run(name, net, inp): assert os.path.exists(onnx_file) os.chmod(onnx_file, stat.S_IWRITE) os.remove(onnx_file) - diff --git a/tests/ut/python/ops/__init__.py b/tests/ut/python/ops/__init__.py index 5443c0ca48..9f7610e25c 100644 --- a/tests/ut/python/ops/__init__.py +++ b/tests/ut/python/ops/__init__.py @@ -15,6 +15,7 @@ """setup for pytest""" import mindspore.context as context + # pylint: disable=unused-argument def setup_module(module): context.set_context(mode=context.GRAPH_MODE) diff --git a/tests/ut/python/ops/test_array_ops.py b/tests/ut/python/ops/test_array_ops.py index 9894525426..90856a0b39 100644 --- a/tests/ut/python/ops/test_array_ops.py +++ b/tests/ut/python/ops/test_array_ops.py @@ -108,6 +108,7 @@ def test_argmin_invalid_output_type(): with pytest.raises(TypeError): P.Argmin(-1, mstype.bool_) + class CustomOP(PrimitiveWithInfer): __mindspore_signature__ = (sig_dtype.T, sig_dtype.T, sig_dtype.T1, sig_dtype.T1, sig_dtype.T2, sig_dtype.T2, @@ -146,9 +147,9 @@ class CustNet1(Cell): self.float1 = 5.1 def construct(self): - x =self.op(self.t1, self.t1, self.int1, - self.float1, self.int1, self.float1, - self.t2, self.t1, self.int1) + x = self.op(self.t1, self.t1, self.int1, + self.float1, self.int1, self.float1, + self.t2, self.t1, self.int1) return x @@ -258,6 +259,7 @@ test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) import mindspore.context as context + @non_graph_engine @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) def test_exec(): diff --git a/tests/ut/python/ops/test_array_ops_check.py b/tests/ut/python/ops/test_array_ops_check.py index f7b77bbb5b..1cea5979cb 100755 --- a/tests/ut/python/ops/test_array_ops_check.py +++ b/tests/ut/python/ops/test_array_ops_check.py @@ -28,10 +28,10 @@ from ..ut_filter import non_graph_engine from mindspore.common.api import _executor from ....mindspore_test_framework.mindspore_test import mindspore_test -from ....mindspore_test_framework.pipeline.forward.compile_forward\ +from ....mindspore_test_framework.pipeline.forward.compile_forward \ import (pipeline_for_compile_forward_ge_graph_for_case_by_case_config, pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception) -from ....mindspore_test_framework.pipeline.gradient.compile_gradient\ +from ....mindspore_test_framework.pipeline.gradient.compile_gradient \ import pipeline_for_compile_grad_ge_graph_for_case_by_case_config @@ -141,12 +141,12 @@ raise_set = [ 'block': (P.Reshape(), {'exception': TypeError, 'error_keywords': ['Reshape']}), 'desc_inputs': [5.0, (1, 2)], 'skip': ['backward']}), - # input shape is var + # input shape is var ('Reshape1', { 'block': (P.Reshape(), {'exception': TypeError, 'error_keywords': ['Reshape']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), (2, 3, 2)], 'skip': ['backward']}), - # element of shape is not int + # element of shape is not int ('Reshape3', { 'block': (ReshapeNet((2, 3.0, 2)), {'exception': TypeError, 'error_keywords': ['Reshape']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32))], diff --git a/tests/ut/python/ops/test_bprop_disorder.py b/tests/ut/python/ops/test_bprop_disorder.py index fad07de2cb..96ecb55d16 100644 --- a/tests/ut/python/ops/test_bprop_disorder.py +++ b/tests/ut/python/ops/test_bprop_disorder.py @@ -28,6 +28,7 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \ class DisOrderTest1(nn.Cell): """ DisOrderTest1 definition """ + def __init__(self): super(DisOrderTest1, self).__init__() weight = Tensor(np.ones([1], np.float32)) @@ -44,6 +45,7 @@ class DisOrderTest1(nn.Cell): class DisOrderTest2(nn.Cell): """ DisOrderTest2 definition """ + def __init__(self): super(DisOrderTest2, self).__init__() weight = Tensor(np.ones([1], np.float32)) @@ -56,11 +58,12 @@ class DisOrderTest2(nn.Cell): def construct(self, x): return self.mul(x, (self.add(self.add(self.add(self.mul(self.s1, self.s2), self.mul(self.s2, self.s3)), - self.mul(self.s3, self.s4)), self.mul(self.s4, self.s1)))) + self.mul(self.s3, self.s4)), self.mul(self.s4, self.s1)))) class GradNetWrap(nn.Cell): """ GradNetWrap definition """ + def __init__(self, net): super(GradNetWrap, self).__init__() self.net = net @@ -87,6 +90,7 @@ test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) import mindspore.context as context + @non_graph_engine @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) def test_exec(): diff --git a/tests/ut/python/ops/test_control_ops.py b/tests/ut/python/ops/test_control_ops.py index 6204bdbabb..27743656f0 100644 --- a/tests/ut/python/ops/test_control_ops.py +++ b/tests/ut/python/ops/test_control_ops.py @@ -59,10 +59,12 @@ def test_cond_data_true(): output = cond_data_test(3, 8) print("test_cond_data_true:", output) + def test_cond_data_false(): output = cond_data_test(8, 3) print("test_cond_data_false:", output) + def if_compile_test(x_init, y_init): class Net(nn.Cell): def __init__(self): @@ -368,6 +370,7 @@ def test_switch_layer(): def __init__(self): super(Layer1, self).__init__() self.z1 = Parameter(Tensor(np.full([128, 96], 0.6, dtype=np.float32)), name='z1') + def construct(self, x): return x * self.z1 @@ -375,6 +378,7 @@ def test_switch_layer(): def __init__(self): super(Layer2, self).__init__() self.z2 = Parameter(Tensor(np.full([128, 96], 0.6, dtype=np.float32)), name='z2') + def construct(self, x): return x * self.z2 @@ -383,6 +387,7 @@ def test_switch_layer(): super(SwitchLayerCell, self).__init__() self.layers = (Layer1(), Layer2()) self.z3 = Parameter(Tensor(np.full([128, 96], 0.6, dtype=np.float32)), name='z3') + def construct(self, index, x): ret = F.switch_layer(index, self.layers)(x) * self.z3 return ret @@ -390,14 +395,17 @@ def test_switch_layer(): index = Tensor(0) net = SwitchLayerCell() net(index, Tensor(np.full([128, 96], 0.6, dtype=np.float32))) - C.grad_by_list(net, ParameterTuple(net.trainable_params()))(index, Tensor(np.full([128, 96], 0.6, dtype=np.float32))) + C.grad_by_list(net, ParameterTuple(net.trainable_params()))(index, + Tensor(np.full([128, 96], 0.6, dtype=np.float32))) C.grad_all(net)(index, Tensor(np.full([128, 96], 0.6, dtype=np.float32))) + def test_index_to_switch_layer(): class Layer1(nn.Cell): def __init__(self): super(Layer1, self).__init__() self.z1 = Parameter(Tensor(np.full([128, 96], 0.6, dtype=np.float32)), name='z1') + def construct(self, x): return x * self.z1 @@ -405,6 +413,7 @@ def test_index_to_switch_layer(): def __init__(self): super(Layer2, self).__init__() self.z2 = Parameter(Tensor(np.full([128, 96], 0.6, dtype=np.float32)), name='z2') + def construct(self, x): return x * self.z2 @@ -413,6 +422,7 @@ def test_index_to_switch_layer(): super(SwitchLayerCell, self).__init__() self.layers = (Layer1(), Layer2()) self.z3 = Parameter(Tensor(np.full([128, 96], 0.6, dtype=np.float32)), name='z3') + def construct(self, index, x): ret = self.layers[index](x) * self.z3 return ret @@ -420,5 +430,6 @@ def test_index_to_switch_layer(): index = Tensor(0) net = SwitchLayerCell() net(index, Tensor(np.full([128, 96], 0.6, dtype=np.float32))) - C.grad_by_list(net, ParameterTuple(net.trainable_params()))(index, Tensor(np.full([128, 96], 0.6, dtype=np.float32))) + C.grad_by_list(net, ParameterTuple(net.trainable_params()))(index, + Tensor(np.full([128, 96], 0.6, dtype=np.float32))) C.grad_all(net)(index, Tensor(np.full([128, 96], 0.6, dtype=np.float32))) diff --git a/tests/ut/python/ops/test_list.py b/tests/ut/python/ops/test_list.py index a961148ca8..1f44b783d5 100644 --- a/tests/ut/python/ops/test_list.py +++ b/tests/ut/python/ops/test_list.py @@ -212,6 +212,7 @@ test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) import mindspore.context as context + @non_graph_engine @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) def test_exec(): diff --git a/tests/ut/python/ops/test_math_ops.py b/tests/ut/python/ops/test_math_ops.py index 7b230e643b..2fef51d913 100755 --- a/tests/ut/python/ops/test_math_ops.py +++ b/tests/ut/python/ops/test_math_ops.py @@ -341,6 +341,7 @@ class SignNet(nn.Cell): def construct(self, x): return self.sign(x) + class AssignAdd(nn.Cell): def __init__(self): super().__init__() @@ -351,6 +352,7 @@ class AssignAdd(nn.Cell): self.inputdata = input_ return self.op(self.inputdata, input_) + class FloorNet(nn.Cell): def __init__(self): super(FloorNet, self).__init__() @@ -359,6 +361,7 @@ class FloorNet(nn.Cell): def construct(self, x): return self.floor(x) + class Log1pNet(nn.Cell): def __init__(self): super(Log1pNet, self).__init__() diff --git a/tests/ut/python/ops/test_math_ops_check.py b/tests/ut/python/ops/test_math_ops_check.py index 86e2480631..9521f18031 100755 --- a/tests/ut/python/ops/test_math_ops_check.py +++ b/tests/ut/python/ops/test_math_ops_check.py @@ -28,15 +28,15 @@ from ..ut_filter import non_graph_engine from mindspore.common.api import _executor from ....mindspore_test_framework.mindspore_test import mindspore_test -from ....mindspore_test_framework.pipeline.forward.compile_forward\ +from ....mindspore_test_framework.pipeline.forward.compile_forward \ import (pipeline_for_compile_forward_ge_graph_for_case_by_case_config, pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception) -from ....mindspore_test_framework.pipeline.gradient.compile_gradient\ +from ....mindspore_test_framework.pipeline.gradient.compile_gradient \ import pipeline_for_compile_grad_ge_graph_for_case_by_case_config class AssignAddNet(nn.Cell): - def __init__(self,): + def __init__(self, ): super(AssignAddNet, self).__init__() self.op = P.AssignAdd() self.inputdata = Parameter(Tensor(np.zeros([1]).astype(np.bool_), mstype.bool_), name="assign_add1") @@ -47,7 +47,7 @@ class AssignAddNet(nn.Cell): class AssignSubNet(nn.Cell): - def __init__(self,): + def __init__(self, ): super(AssignSubNet, self).__init__() self.op = P.AssignSub() self.inputdata = Parameter(Tensor(np.zeros([1]).astype(np.bool_), mstype.bool_), name="assign_sub1") @@ -113,156 +113,156 @@ raise_set = [ # type of axis is float, not int ('ReduceMean1', { 'block': (ReduceNet(P.ReduceMean, keep_dims=True, axis=5.0), - {'exception': TypeError, 'error_keywords': ['ReduceMean']}), + {'exception': TypeError, 'error_keywords': ['ReduceMean']}), 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], 'skip': ['backward']}), # axis is out of range ('ReduceMean2', { 'block': (ReduceNet(P.ReduceMean, keep_dims=True, axis=5), - {'exception': ValueError, 'error_keywords': ['ReduceMean']}), + {'exception': ValueError, 'error_keywords': ['ReduceMean']}), 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], 'skip': ['backward']}), # type of axis is float, not int ('ReduceSum1', { 'block': (ReduceNet(P.ReduceSum, keep_dims=True, axis=5.0), - {'exception': TypeError, 'error_keywords': ['ReduceSum']}), + {'exception': TypeError, 'error_keywords': ['ReduceSum']}), 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], 'skip': ['backward']}), # axis is out of range ('ReduceSum2', { 'block': (ReduceNet(P.ReduceSum, keep_dims=True, axis=5), - {'exception': ValueError, 'error_keywords': ['ReduceSum']}), + {'exception': ValueError, 'error_keywords': ['ReduceSum']}), 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], 'skip': ['backward']}), # type of axis is float, not int ('ReduceAll1', { 'block': (ReduceNet(P.ReduceAll, keep_dims=True, axis=5.0), - {'exception': TypeError, 'error_keywords': ['ReduceAll']}), + {'exception': TypeError, 'error_keywords': ['ReduceAll']}), 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.bool_))], 'skip': ['backward']}), # axis is out of range ('ReduceAll2', { 'block': (ReduceNet(P.ReduceAll, keep_dims=True, axis=5), - {'exception': ValueError, 'error_keywords': ['ReduceAll']}), + {'exception': ValueError, 'error_keywords': ['ReduceAll']}), 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.bool_))], 'skip': ['backward']}), # type of axis is float, not int ('ReduceMax1', { 'block': (ReduceNet(P.ReduceMax, keep_dims=True, axis=5.0), - {'exception': TypeError, 'error_keywords': ['ReduceMax']}), + {'exception': TypeError, 'error_keywords': ['ReduceMax']}), 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], 'skip': ['backward']}), # axis is out of range ('ReduceMax2', { 'block': (ReduceNet(P.ReduceMax, keep_dims=True, axis=5), - {'exception': ValueError, 'error_keywords': ['ReduceMax']}), + {'exception': ValueError, 'error_keywords': ['ReduceMax']}), 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], 'skip': ['backward']}), # type of axis is float, not int ('ReduceMin1', { 'block': (ReduceNet(P.ReduceMin, keep_dims=True, axis=5.0), - {'exception': TypeError, 'error_keywords': ['ReduceMin']}), + {'exception': TypeError, 'error_keywords': ['ReduceMin']}), 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], 'skip': ['backward']}), # axis is out of range ('ReduceMin2', { 'block': (ReduceNet(P.ReduceMin, keep_dims=True, axis=5), - {'exception': ValueError, 'error_keywords': ['ReduceMin']}), + {'exception': ValueError, 'error_keywords': ['ReduceMin']}), 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], 'skip': ['backward']}), # type of axis is float, not int ('ReduceProd1', { 'block': (ReduceNet(P.ReduceProd, keep_dims=True, axis=5.0), - {'exception': TypeError, 'error_keywords': ['ReduceProd']}), + {'exception': TypeError, 'error_keywords': ['ReduceProd']}), 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], 'skip': ['backward']}), # axis is out of range ('ReduceProd2', { 'block': (ReduceNet(P.ReduceProd, keep_dims=True, axis=5), - {'exception': ValueError, 'error_keywords': ['ReduceProd']}), + {'exception': ValueError, 'error_keywords': ['ReduceProd']}), 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], 'skip': ['backward']}), # type of x is Tensor(bool) ('CumProd1', { 'block': (CumProdNet(), - {'exception': TypeError, 'error_keywords': ['CumProd']}), + {'exception': TypeError, 'error_keywords': ['CumProd']}), 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.bool)), 1], 'skip': ['backward']}), # type of axis in float, not int ('CumProd2', { 'block': (CumProdNet(), - {'exception': TypeError, 'error_keywords': ['CumProd']}), + {'exception': TypeError, 'error_keywords': ['CumProd']}), 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32)), 5.0], 'skip': ['backward']}), # type of x and y are Tensor(uint32) ('MatMul1', { 'block': (P.MatMul(), - {'exception': TypeError, 'error_keywords': ['MatMul']}), + {'exception': TypeError, 'error_keywords': ['MatMul']}), 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.uint32)), Tensor(np.ones([3, 2]).astype(np.uint32))], 'skip': ['backward']}), # type of x and y not match ('MatMul2', { 'block': (P.MatMul(), - {'exception': TypeError, 'error_keywords': ['MatMul']}), + {'exception': TypeError, 'error_keywords': ['MatMul']}), 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.int32))], 'skip': ['backward']}), # shape of x and y not match ('MatMul3', { 'block': (P.MatMul(), - {'exception': ValueError, 'error_keywords': ['MatMul']}), + {'exception': ValueError, 'error_keywords': ['MatMul']}), 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.float32)), Tensor(np.ones([2, 3]).astype(np.float32))], 'skip': ['backward']}), # dims of x and y are less than 3 ('BatchMatMul1', { 'block': (P.BatchMatMul(), - {'exception': ValueError, 'error_keywords': ['BatchMatMul']}), + {'exception': ValueError, 'error_keywords': ['BatchMatMul']}), 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.int32)), Tensor(np.ones([3, 2]).astype(np.int32))], 'skip': ['backward']}), # type of x is Tensor(bool) ('CumSum1', { 'block': (CumSumNet(axis=1), - {'exception': TypeError, 'error_keywords': ['CumSum']}), + {'exception': TypeError, 'error_keywords': ['CumSum']}), 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.bool))], 'skip': ['backward']}), # type of axis in float, not int ('CumSum2', { 'block': (CumSumNet(axis=1.0), - {'exception': TypeError, 'error_keywords': ['CumSum']}), + {'exception': TypeError, 'error_keywords': ['CumSum']}), 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.bool))], 'skip': ['backward']}), # intput is not tuple or list ('AddN1', { 'block': (P.AddN(), - {'exception': TypeError, 'error_keywords': ['AddN']}), + {'exception': TypeError, 'error_keywords': ['AddN']}), 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.uint32))], 'skip': ['backward']}), # type not match ('AddN2', { 'block': (P.AddN(), - {'exception': TypeError, 'error_keywords': ['AddN']}), + {'exception': TypeError, 'error_keywords': ['AddN']}), 'desc_inputs': [(Tensor(np.ones([2, 3]).astype(np.uint32)), Tensor(np.ones([3, 2]).astype(np.int32)))], 'skip': ['backward']}), # shape not match ('AddN3', { 'block': (P.AddN(), - {'exception': ValueError, 'error_keywords': ['AddN']}), + {'exception': ValueError, 'error_keywords': ['AddN']}), 'desc_inputs': [(Tensor(np.ones([2, 3]).astype(np.int32)), Tensor(np.ones([3, 2]).astype(np.int32)))], 'skip': ['backward']}), # input is Tensor(bool) ('Neg1', { 'block': (P.Neg(), - {'exception': TypeError, 'error_keywords': ['Neg']}), + {'exception': TypeError, 'error_keywords': ['Neg']}), 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.bool_))], 'skip': ['backward']}), @@ -291,49 +291,49 @@ raise_set = [ # input is Tensor(bool) ('Square1', { 'block': (P.Square(), - {'exception': TypeError, 'error_keywords': ['Square']}), + {'exception': TypeError, 'error_keywords': ['Square']}), 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.bool_))], 'skip': ['backward']}), # input is Tensor(bool) ('Rsqrt1', { 'block': (P.Rsqrt(), - {'exception': TypeError, 'error_keywords': ['Rsqrt']}), + {'exception': TypeError, 'error_keywords': ['Rsqrt']}), 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.bool_))], 'skip': ['backward']}), # input is Tensor(bool) ('Sqrt1', { 'block': (P.Sqrt(), - {'exception': TypeError, 'error_keywords': ['Sqrt']}), + {'exception': TypeError, 'error_keywords': ['Sqrt']}), 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.bool_))], 'skip': ['backward']}), # input is not Tensor ('Reciprocal1', { 'block': (P.Reciprocal(), - {'exception': TypeError, 'error_keywords': ['Reciprocal']}), + {'exception': TypeError, 'error_keywords': ['Reciprocal']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # input x is Tensor(bool) ('Pow1', { 'block': (P.Pow(), - {'exception': TypeError, 'error_keywords': ['Pow']}), + {'exception': TypeError, 'error_keywords': ['Pow']}), 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.bool_)), 2.0], 'skip': ['backward']}), # input is not Tensor ('Exp1', { 'block': (P.Exp(), - {'exception': TypeError, 'error_keywords': ['Exp']}), + {'exception': TypeError, 'error_keywords': ['Exp']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # input is not Tensor ('Log1', { 'block': (P.Log(), - {'exception': TypeError, 'error_keywords': ['Log']}), + {'exception': TypeError, 'error_keywords': ['Log']}), 'desc_inputs': [5.0], 'skip': ['backward']}), @@ -395,7 +395,7 @@ raise_set = [ # input x is Tensor(int32), not Tensor(float) ('Floor1', { 'block': (P.Floor(), - {'exception': TypeError, 'error_keywords': ['Floor']}), + {'exception': TypeError, 'error_keywords': ['Floor']}), 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.int32))], 'skip': ['backward']}), @@ -413,7 +413,7 @@ raise_set = [ # input x is Tensor(int32), not Tensor(float) ('Acosh1', { 'block': (P.Acosh(), - {'exception': TypeError, 'error_keywords': ['Acosh']}), + {'exception': TypeError, 'error_keywords': ['Acosh']}), 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.bool_))], 'skip': ['backward']}), @@ -498,7 +498,7 @@ raise_set = [ # input x is not Tensor(bool) ('LogicalNot1', { 'block': (P.LogicalNot(), - {'exception': TypeError, 'error_keywords': ['LogicalNot']}), + {'exception': TypeError, 'error_keywords': ['LogicalNot']}), 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.int32))], 'skip': ['backward']}), diff --git a/tests/ut/python/ops/test_momentum.py b/tests/ut/python/ops/test_momentum.py index 28b9637015..f86f6e7d97 100644 --- a/tests/ut/python/ops/test_momentum.py +++ b/tests/ut/python/ops/test_momentum.py @@ -24,6 +24,7 @@ from ..ut_filter import non_graph_engine from ....mindspore_test_framework.mindspore_test import mindspore_test from ....mindspore_test_framework.pipeline.forward.compile_forward \ import pipeline_for_compile_forward_ge_graph_for_case_by_case_config + # pylint: disable=W0613 # W0613: unused-argument @@ -45,6 +46,7 @@ def tensor_run_opt(opt, iters, learning_rate, momentum, class OptimizerByMomentum(nn.Cell): """ OptimizerByMomentum definition """ + def __init__(self, weights): super(OptimizerByMomentum, self).__init__() self.learning_rate = Parameter(0.1, name="learning_rate") @@ -69,6 +71,7 @@ class OptimizerByMomentum(nn.Cell): class TrainStepWrap(nn.Cell): """ TrainStepWrap definition """ + def __init__(self, network): super(TrainStepWrap, self).__init__() self.network = network @@ -84,6 +87,7 @@ class TrainStepWrap(nn.Cell): class NetWithLossClass(nn.Cell): """ NetWithLossClass definition """ + def __init__(self, network): super(NetWithLossClass, self).__init__(auto_prefix=False) self.loss = nn.SoftmaxCrossEntropyWithLogits() @@ -96,6 +100,7 @@ class NetWithLossClass(nn.Cell): class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight") @@ -122,6 +127,7 @@ test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) import mindspore.context as context + @non_graph_engine @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) def test_exec(): diff --git a/tests/ut/python/ops/test_multitype_ops.py b/tests/ut/python/ops/test_multitype_ops.py index 588143fb19..c640501c9a 100644 --- a/tests/ut/python/ops/test_multitype_ops.py +++ b/tests/ut/python/ops/test_multitype_ops.py @@ -26,6 +26,7 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \ class ScalarAddScalar(nn.Cell): """ ScalarAddScalar definition """ + def __init__(self, ): super(ScalarAddScalar, self).__init__() self.n1 = 1.2 @@ -37,6 +38,7 @@ class ScalarAddScalar(nn.Cell): class ScalarAddTensor1(nn.Cell): """ ScalarAddTensor1 definition """ + def __init__(self, ): super(ScalarAddTensor1, self).__init__() self.t1 = Tensor(np.ones([2, 1, 2, 2], np.float32)) @@ -49,6 +51,7 @@ class ScalarAddTensor1(nn.Cell): class ScalarAddTensor2(nn.Cell): """ ScalarAddTensor2 definition """ + def __init__(self, ): super(ScalarAddTensor2, self).__init__() self.t1 = Tensor(np.ones([2, 1, 2, 2], np.float32)) @@ -61,6 +64,7 @@ class ScalarAddTensor2(nn.Cell): class TensorAddScalar(nn.Cell): """ TensorAddScalar definition """ + def __init__(self, ): super(TensorAddScalar, self).__init__() self.t1 = Tensor(np.ones([2, 1, 2, 2], np.float32)) @@ -72,6 +76,7 @@ class TensorAddScalar(nn.Cell): class ScalarTensorSub(nn.Cell): """ ScalarTensorSub definition """ + def __init__(self, ): super(ScalarTensorSub, self).__init__() self.t1 = Tensor(np.ones([2, 1, 2, 2], np.float32)) @@ -87,6 +92,7 @@ class ScalarTensorSub(nn.Cell): class ScalarTensorMul(nn.Cell): """ ScalarTensorMul definition """ + def __init__(self, ): super(ScalarTensorMul, self).__init__() self.t1 = Tensor(np.ones([2, 1, 2, 2], np.float32)) @@ -102,6 +108,7 @@ class ScalarTensorMul(nn.Cell): class ScalarTensorDiv(nn.Cell): """ ScalarTensorDiv definition """ + def __init__(self, ): super(ScalarTensorDiv, self).__init__() self.t1 = Tensor(np.ones([2, 1, 2, 2], np.float32)) @@ -279,6 +286,7 @@ test_exec_case = reduce(lambda x, y: x + y, test_case_lists) import mindspore.context as context + @non_graph_engine @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) def test_exec(): diff --git a/tests/ut/python/ops/test_nn_ops.py b/tests/ut/python/ops/test_nn_ops.py index a245f7b537..c9746128a2 100644 --- a/tests/ut/python/ops/test_nn_ops.py +++ b/tests/ut/python/ops/test_nn_ops.py @@ -479,7 +479,7 @@ test_cases = [ }), ('HistogramSummary', { 'block': HistogramSummaryNet(), - 'desc_inputs': [[1,2,3]], + 'desc_inputs': [[1, 2, 3]], }), ('FusedBatchNormGrad', { 'block': FusedBatchNormGrad(nn.BatchNorm2d(num_features=512, eps=1e-5, momentum=0.1)), @@ -539,7 +539,7 @@ test_cases = [ 'desc_bprop': [Tensor(np.array([1, 2, 3, 4]).astype(np.float32))], 'skip': ['backward']}), ('ReduceLogSumExp', { - 'block': nn.ReduceLogSumExp((0, ), False), + 'block': nn.ReduceLogSumExp((0,), False), 'desc_inputs': [Tensor(np.array([3, 4, 5, 6]).astype(np.float32))], 'desc_bprop': [Tensor(np.array([1, 2, 3, 4]).astype(np.float32))], 'skip': ['backward']}), diff --git a/tests/ut/python/ops/test_nn_ops_check.py b/tests/ut/python/ops/test_nn_ops_check.py index 4060bb2e15..8f7f04b078 100755 --- a/tests/ut/python/ops/test_nn_ops_check.py +++ b/tests/ut/python/ops/test_nn_ops_check.py @@ -28,10 +28,10 @@ from ..ut_filter import non_graph_engine from mindspore.common.api import _executor from ....mindspore_test_framework.mindspore_test import mindspore_test -from ....mindspore_test_framework.pipeline.forward.compile_forward\ +from ....mindspore_test_framework.pipeline.forward.compile_forward \ import (pipeline_for_compile_forward_ge_graph_for_case_by_case_config, pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception) -from ....mindspore_test_framework.pipeline.gradient.compile_gradient\ +from ....mindspore_test_framework.pipeline.gradient.compile_gradient \ import pipeline_for_compile_grad_ge_graph_for_case_by_case_config @@ -209,72 +209,79 @@ raise_set = [ # rank of x is not 4 ('Conv2D3', { 'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}), - 'desc_inputs': [Tensor(np.ones([1, 1]).astype(np.float32)), Tensor(np.ones([1,1,9,9]).astype(np.float32))], + 'desc_inputs': [Tensor(np.ones([1, 1]).astype(np.float32)), Tensor(np.ones([1, 1, 9, 9]).astype(np.float32))], 'skip': ['backward']}), # rank of 2 is not 4 ('Conv2D4', { 'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}), - 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,1,9]).astype(np.float32))], + 'desc_inputs': [Tensor(np.ones([1, 1, 9, 9]).astype(np.float32)), + Tensor(np.ones([1, 1, 9]).astype(np.float32))], 'skip': ['backward']}), # x_shape[1] / group != w_shape[1] ('Conv2D5', { 'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}), - 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,2,9,9]).astype(np.float32))], + 'desc_inputs': [Tensor(np.ones([1, 1, 9, 9]).astype(np.float32)), + Tensor(np.ones([1, 2, 9, 9]).astype(np.float32))], 'skip': ['backward']}), # out_channel != w_shape[0] ('Conv2D6', { 'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}), - 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,1,9,9]).astype(np.float32))], + 'desc_inputs': [Tensor(np.ones([1, 1, 9, 9]).astype(np.float32)), + Tensor(np.ones([1, 1, 9, 9]).astype(np.float32))], 'skip': ['backward']}), # kernel_size != w_shape[2:4] ('Conv2D7', { 'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}), - 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([2,1,5,6]).astype(np.float32))], + 'desc_inputs': [Tensor(np.ones([1, 1, 9, 9]).astype(np.float32)), + Tensor(np.ones([2, 1, 5, 6]).astype(np.float32))], 'skip': ['backward']}), - + # input is scalar ('DepthwiseConv2dNative0', { 'block': (P.DepthwiseConv2dNative(2, (5, 5)), - {'exception': TypeError, 'error_keywords': ['DepthwiseConv2dNative']}), + {'exception': TypeError, 'error_keywords': ['DepthwiseConv2dNative']}), 'desc_inputs': [5.0, 5.0], 'skip': ['backward']}), # input is Tensor(bool) ('DepthwiseConv2dNative1', { 'block': (P.DepthwiseConv2dNative(2, (5, 5)), - {'exception': TypeError, 'error_keywords': ['DepthwiseConv2dNative']}), + {'exception': TypeError, 'error_keywords': ['DepthwiseConv2dNative']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))], 'skip': ['backward']}), # input x and w type mismatch ('DepthwiseConv2dNative2', { 'block': (P.DepthwiseConv2dNative(2, (5, 5)), - {'exception': TypeError, 'error_keywords': ['DepthwiseConv2dNative']}), + {'exception': TypeError, 'error_keywords': ['DepthwiseConv2dNative']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([5]).astype(np.float16))], 'skip': ['backward']}), # rank of x is not 4 ('DepthwiseConv2dNative3', { 'block': (P.DepthwiseConv2dNative(2, (5, 5)), - {'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}), - 'desc_inputs': [Tensor(np.ones([1, 1]).astype(np.float32)), Tensor(np.ones([1,1,9,9]).astype(np.float32))], + {'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}), + 'desc_inputs': [Tensor(np.ones([1, 1]).astype(np.float32)), Tensor(np.ones([1, 1, 9, 9]).astype(np.float32))], 'skip': ['backward']}), # rank of 2 is not 4 ('DepthwiseConv2dNative4', { 'block': (P.DepthwiseConv2dNative(2, (5, 5)), - {'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}), - 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,1,9]).astype(np.float32))], + {'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}), + 'desc_inputs': [Tensor(np.ones([1, 1, 9, 9]).astype(np.float32)), + Tensor(np.ones([1, 1, 9]).astype(np.float32))], 'skip': ['backward']}), # x_shape[1] != w_shape[1] ('DepthwiseConv2dNative5', { 'block': (P.DepthwiseConv2dNative(2, (5, 5)), - {'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}), - 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,2,9,9]).astype(np.float32))], + {'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}), + 'desc_inputs': [Tensor(np.ones([1, 1, 9, 9]).astype(np.float32)), + Tensor(np.ones([1, 2, 9, 9]).astype(np.float32))], 'skip': ['backward']}), # kernel_size != w_shape[2:4] ('DepthwiseConv2dNative6', { 'block': (P.DepthwiseConv2dNative(2, (5, 5)), - {'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}), - 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([2,1,5,6]).astype(np.float32))], + {'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}), + 'desc_inputs': [Tensor(np.ones([1, 1, 9, 9]).astype(np.float32)), + Tensor(np.ones([2, 1, 5, 6]).astype(np.float32))], 'skip': ['backward']}), - + # input is scalar ('MaxPoolWithArgmax0', { 'block': (P.MaxPoolWithArgmax(), {'exception': TypeError, 'error_keywords': ['MaxPoolWithArgmax']}), @@ -288,12 +295,12 @@ raise_set = [ # rank of x is not 4 ('MaxPoolWithArgmax2', { 'block': (P.MaxPoolWithArgmax(), {'exception': ValueError, 'error_keywords': ['MaxPoolWithArgmax']}), - 'desc_inputs': [Tensor(np.ones([1,1,32]).astype(np.float32))], + 'desc_inputs': [Tensor(np.ones([1, 1, 32]).astype(np.float32))], 'skip': ['backward']}), # kernel size is invalid(very large) ('MaxPoolWithArgmax3', { 'block': (P.MaxPoolWithArgmax(ksize=50), {'exception': ValueError, 'error_keywords': ['MaxPoolWithArgmax']}), - 'desc_inputs': [Tensor(np.ones([1,1,32,32]).astype(np.float32))], + 'desc_inputs': [Tensor(np.ones([1, 1, 32, 32]).astype(np.float32))], 'skip': ['backward']}), # input is scalar @@ -304,12 +311,12 @@ raise_set = [ # rank of x is not 4 ('MaxPool1', { 'block': (P.MaxPool(), {'exception': ValueError, 'error_keywords': ['MaxPool']}), - 'desc_inputs': [Tensor(np.ones([1,1,32]).astype(np.float32))], + 'desc_inputs': [Tensor(np.ones([1, 1, 32]).astype(np.float32))], 'skip': ['backward']}), # rank of x is not 4 ('MaxPool2', { 'block': (P.MaxPool(ksize=50, strides=1), {'exception': ValueError, 'error_keywords': ['MaxPool']}), - 'desc_inputs': [Tensor(np.ones([1,1,32,32]).astype(np.float32))], + 'desc_inputs': [Tensor(np.ones([1, 1, 32, 32]).astype(np.float32))], 'skip': ['backward']}), # input is scalar @@ -320,42 +327,42 @@ raise_set = [ # rank of x is not 4 ('AvgPool1', { 'block': (P.AvgPool(), {'exception': ValueError, 'error_keywords': ['AvgPool']}), - 'desc_inputs': [Tensor(np.ones([1,1,32]).astype(np.float32))], + 'desc_inputs': [Tensor(np.ones([1, 1, 32]).astype(np.float32))], 'skip': ['backward']}), # rank of x is not 4 ('AvgPool2', { 'block': (P.AvgPool(ksize=50, strides=1), {'exception': ValueError, 'error_keywords': ['AvgPool']}), - 'desc_inputs': [Tensor(np.ones([1,1,32,32]).astype(np.float32))], + 'desc_inputs': [Tensor(np.ones([1, 1, 32, 32]).astype(np.float32))], 'skip': ['backward']}), # input is scalar ('Conv2DBackpropInput0', { - 'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2,3)), - {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}), + 'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2, 3)), + {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}), 'desc_inputs': [5.0, 5.0], 'skip': ['backward']}), # input is Tensor(bool) ('Conv2DBackpropInput1', { - 'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2,3)), - {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}), + 'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2, 3)), + {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))], 'skip': ['backward']}), # types of doutput and w mismatch ('Conv2DBackpropInput2', { - 'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2,3)), - {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}), + 'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2, 3)), + {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.int32)), Tensor(np.ones([5]).astype(np.float32))], 'skip': ['backward']}), # types x_size is not tuple ('Conv2DBackpropInput3', { 'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), 2), - {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}), + {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.int32)), Tensor(np.ones([5]).astype(np.float32))], 'skip': ['backward']}), # types x_size is not tuple(int,...) ('Conv2DBackpropInput4', { 'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2, 3.0)), - {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}), + {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.int32)), Tensor(np.ones([5]).astype(np.float32))], 'skip': ['backward']}), @@ -409,50 +416,50 @@ raise_set = [ # input is scalar ('SoftmaxCrossEntropyWithLogits0', { 'block': (P.SoftmaxCrossEntropyWithLogits(), - {'exception': TypeError, 'error_keywords': ['SoftmaxCrossEntropyWithLogits']}), + {'exception': TypeError, 'error_keywords': ['SoftmaxCrossEntropyWithLogits']}), 'desc_inputs': [5.0, 5.0], 'skip': ['backward']}), # input is Tensor(bool) ('SoftmaxCrossEntropyWithLogits1', { 'block': (P.SoftmaxCrossEntropyWithLogits(), - {'exception': TypeError, 'error_keywords': ['SoftmaxCrossEntropyWithLogits']}), + {'exception': TypeError, 'error_keywords': ['SoftmaxCrossEntropyWithLogits']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))], 'skip': ['backward']}), # types of logits and labels mismatch ('SoftmaxCrossEntropyWithLogits2', { 'block': (P.SoftmaxCrossEntropyWithLogits(), - {'exception': TypeError, 'error_keywords': ['SoftmaxCrossEntropyWithLogits']}), + {'exception': TypeError, 'error_keywords': ['SoftmaxCrossEntropyWithLogits']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.float16)), Tensor(np.ones([5]).astype(np.float32))], 'skip': ['backward']}), # shapes of logits and labels mismatch ('SoftmaxCrossEntropyWithLogits3', { 'block': (P.SoftmaxCrossEntropyWithLogits(), - {'exception': ValueError, 'error_keywords': ['SoftmaxCrossEntropyWithLogits']}), + {'exception': ValueError, 'error_keywords': ['SoftmaxCrossEntropyWithLogits']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32))], 'skip': ['backward']}), # input is scalar ('SparseSoftmaxCrossEntropyWithLogits0', { 'block': (P.SparseSoftmaxCrossEntropyWithLogits(), - {'exception': TypeError, 'error_keywords': ['SparseSoftmaxCrossEntropyWithLogits']}), + {'exception': TypeError, 'error_keywords': ['SparseSoftmaxCrossEntropyWithLogits']}), 'desc_inputs': [5.0, 5.0], 'skip': ['backward']}), # logits is Tensor(bool) ('SparseSoftmaxCrossEntropyWithLogits1', { 'block': (P.SparseSoftmaxCrossEntropyWithLogits(), - {'exception': TypeError, 'error_keywords': ['SparseSoftmaxCrossEntropyWithLogits']}), + {'exception': TypeError, 'error_keywords': ['SparseSoftmaxCrossEntropyWithLogits']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))], 'skip': ['backward']}), # labels is Tensor(bool) ('SparseSoftmaxCrossEntropyWithLogits2', { 'block': (P.SparseSoftmaxCrossEntropyWithLogits(), - {'exception': TypeError, 'error_keywords': ['SparseSoftmaxCrossEntropyWithLogits']}), + {'exception': TypeError, 'error_keywords': ['SparseSoftmaxCrossEntropyWithLogits']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([5]).astype(np.bool_))], 'skip': ['backward']}), # logits_shape[0] != labels_shape[0] ('SparseSoftmaxCrossEntropyWithLogits3', { 'block': (P.SparseSoftmaxCrossEntropyWithLogits(), - {'exception': ValueError, 'error_keywords': ['SparseSoftmaxCrossEntropyWithLogits']}), + {'exception': ValueError, 'error_keywords': ['SparseSoftmaxCrossEntropyWithLogits']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([3]).astype(np.int32))], 'skip': ['backward']}), ] diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index 580bfaa83d..4793d1fca9 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -185,6 +185,7 @@ class ScatterMax(nn.Cell): out = self.scatter_max(self.ref, indices, updates) return out + class ApplyFtrlNet(nn.Cell): def __init__(self): super(ApplyFtrlNet, self).__init__() @@ -944,7 +945,7 @@ test_case_nn_ops = [ ('ROIAlign', { 'block': P.ROIAlign(7, 7, 0.03125, 2), 'desc_inputs': [[2, 256, 192, 320], [1024, 5]], - 'desc_bprop': [[7,7]]}), + 'desc_bprop': [[7, 7]]}), ('ROIAlignGrad', { 'block': G.ROIAlignGrad((1, 1, 1, 1), 2, 2, 0.5, 2), 'desc_inputs': [[1, 1, 2, 2], [1, 5]], @@ -957,7 +958,7 @@ test_case_nn_ops = [ 'desc_bprop': [3, 3], 'skip': ['backward']}), ('SGD', { - 'block': P.SGD(0.0, 0.0, False), + 'block': P.SGD(0.0, 0.0, False), 'desc_inputs': [[3, 3], [3, 3], Tensor(0.001, mstype.float32), [3, 3], Tensor(0.1, mstype.float32), [3, 3]], 'desc_bprop': [3, 3], 'skip': ['backward']}), diff --git a/tests/ut/python/ops/test_ops_check.py b/tests/ut/python/ops/test_ops_check.py index aa379cc64e..0a4a2fb5e2 100644 --- a/tests/ut/python/ops/test_ops_check.py +++ b/tests/ut/python/ops/test_ops_check.py @@ -27,11 +27,13 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \ import pipeline_for_compile_forward_ge_graph_for_case_by_case_config from ....mindspore_test_framework.pipeline.forward.verify_exception \ import pipeline_for_verify_exception_for_case_by_case_config + logging.basicConfig(level=logging.WARNING) class NetMissConstruct(nn.Cell): """ NetMissConstruct definition """ + def __init__(self): super(NetMissConstruct, self).__init__() self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid') @@ -70,6 +72,7 @@ def test_net_without_construct(): class NetWithRaise(nn.Cell): """ NetWithRaise definition """ + def __init__(self): super(NetWithRaise, self).__init__() self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid') @@ -94,6 +97,7 @@ def test_net_with_raise(): class NetAddN(nn.Cell): """net for test AddN""" + def __init__(self): super(NetAddN, self).__init__() self.net = P.AddN() @@ -104,6 +108,7 @@ class NetAddN(nn.Cell): class NetSplit(nn.Cell): "net for test Split" + def __init__(self): super(NetSplit, self).__init__() self.net = P.Split(1, 2) @@ -114,6 +119,7 @@ class NetSplit(nn.Cell): class NetBatchMatMul(nn.Cell): """net for test BatchMatMul""" + def __init__(self): super(NetBatchMatMul, self).__init__() self.op = P.BatchMatMul() @@ -215,6 +221,7 @@ test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) import mindspore.context as context + @non_graph_engine @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) def test_exec(): diff --git a/tests/ut/python/ops/test_ops_reid.py b/tests/ut/python/ops/test_ops_reid.py index c1176808a8..57a595eca9 100644 --- a/tests/ut/python/ops/test_ops_reid.py +++ b/tests/ut/python/ops/test_ops_reid.py @@ -21,14 +21,15 @@ import mindspore.nn as nn from ....ops_common import convert from ....mindspore_test_framework.mindspore_test import mindspore_test -from ....mindspore_test_framework.pipeline.forward.compile_forward\ -import pipeline_for_compile_forward_ge_graph_for_case_by_case_config -from ....mindspore_test_framework.pipeline.gradient.compile_gradient\ -import pipeline_for_compile_grad_ge_graph_for_case_by_case_config +from ....mindspore_test_framework.pipeline.forward.compile_forward \ + import pipeline_for_compile_forward_ge_graph_for_case_by_case_config +from ....mindspore_test_framework.pipeline.gradient.compile_gradient \ + import pipeline_for_compile_grad_ge_graph_for_case_by_case_config class SeqConvBnRelu(nn.Cell): """ SeqConvBnRelu definition """ + def __init__(self, in_ch, out_ch): super(SeqConvBnRelu, self).__init__() self.conv = nn.Conv2d(in_ch, out_ch, 3) @@ -45,13 +46,13 @@ test_case_reid_ops = [ 'desc_const': [(1,)], 'desc_inputs': [convert([32, 32], np.float16)], 'desc_bprop': [convert([32], np.float16)], - 'skip':[]}), + 'skip': []}), ('ReduceMin', { 'block': P.ReduceMin(), 'desc_const': [(1,)], 'desc_inputs': [[32, 32]], 'desc_bprop': [[32]], - 'skip':[]}), + 'skip': []}), ('ReduceMean', { 'block': P.ReduceMean(keep_dims=True), 'desc_const': [(1, 2)], @@ -61,12 +62,12 @@ test_case_reid_ops = [ 'block': P.Log(), 'desc_inputs': [[4, 128, 1024]], 'desc_bprop': [[4, 128, 1024]], - 'skip':['backward']}), # check backward error + 'skip': ['backward']}), # check backward error ('Reciprocal', { 'block': P.Reciprocal(), 'desc_inputs': [[4, 128, 1024]], 'desc_bprop': [[4, 128, 1024]], - 'skip':['backward']}), + 'skip': ['backward']}), ('FloorDiv', { 'block': P.FloorDiv(), 'desc_inputs': [[4, 128, 1024], [4, 128, 1024]], @@ -79,12 +80,12 @@ test_case_reid_ops = [ 'block': P.Softmax(), 'desc_inputs': [[1, 16]], 'desc_bprop': [[1, 16]], - 'skip':['backward']}), # check backward error + 'skip': ['backward']}), # check backward error ('Softmax', { 'block': P.Softmax(axis=(0, 1)), 'desc_inputs': [[1, 16]], 'desc_bprop': [[1, 16]], - 'skip':['backward']}), + 'skip': ['backward']}), ('L2Normalize', { 'block': P.L2Normalize(), 'desc_inputs': [[4, 128, 1024]], @@ -103,7 +104,7 @@ test_case_reid_ops = [ 'desc_bprop': [[128, 64, 112, 112]]}), ('PRelu', { 'block': P.PReLU(), - 'desc_inputs': [[128, 64, 112, 112], [64,]], + 'desc_inputs': [[128, 64, 112, 112], [64, ]], 'desc_bprop': [[128, 64, 112, 112]]}), ('Cos', { 'block': P.Cos(), @@ -137,7 +138,7 @@ test_case_reid_ops = [ ('Dropout', { 'block': nn.Dropout(), 'desc_inputs': [[1, 512, 7, 7]], - 'desc_bprop': [[1, 512, 7, 7]]}), # 输入有标量插件产生了段错误。 + 'desc_bprop': [[1, 512, 7, 7]]}), # 输入有标量插件产生了段错误。 ('MatMul', { 'block': P.MatMul(), 'desc_inputs': [[64, 512], [512, 64]], # fp16不行。很有问题。 @@ -155,16 +156,18 @@ test_case = functools.reduce(lambda x, y: x + y, test_case_lists) test_exec_case = filter(lambda x: 'skip' not in x[1] or - 'exec' not in x[1]['skip'], test_case) + 'exec' not in x[1]['skip'], test_case) test_backward_exec_case = filter(lambda x: 'skip' not in x[1] or - 'backward' not in x[1]['skip'] and 'backward_exec' - not in x[1]['skip'], test_case) + 'backward' not in x[1]['skip'] and 'backward_exec' + not in x[1]['skip'], test_case) + @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) def test_exec(): return test_exec_case + @mindspore_test(pipeline_for_compile_grad_ge_graph_for_case_by_case_config) def test_backward_exec(): return test_backward_exec_case diff --git a/tests/ut/python/ops/test_python_operators.py b/tests/ut/python/ops/test_python_operators.py index 705774068d..9e9877e6c9 100644 --- a/tests/ut/python/ops/test_python_operators.py +++ b/tests/ut/python/ops/test_python_operators.py @@ -75,7 +75,7 @@ class ScalarCompareNet(nn.Cell): else: t = t + x a = 32.0 * 12 - b = 12/3.0 + b = 12 / 3.0 if a > b: t = t * x else: diff --git a/tests/ut/python/ops/test_tensor_slice.py b/tests/ut/python/ops/test_tensor_slice.py index 32c4025368..5d0c76b447 100644 --- a/tests/ut/python/ops/test_tensor_slice.py +++ b/tests/ut/python/ops/test_tensor_slice.py @@ -100,7 +100,7 @@ class TensorAssignWithSliceError1(Cell): super(TensorAssignWithSliceError1, self).__init__() def construct(self, a, b): - a[1:3:-1,::] = b + a[1:3:-1, ::] = b return a @@ -134,14 +134,14 @@ class TensorAssignWithSlice(Cell): self.c = 2 def construct(self, a, b, ck): - a[1:3,::] = b - a[2:3:,3:] = b + a[1:3, ::] = b + a[2:3:, 3:] = b a[::] = b a[::] = self.c - a[::,::] = b - a[::,::] = self.c - a[2:3:,0:, 4:1:-1] = b - a[2:3:,0:, 4:1:-1] = self.c + a[::, ::] = b + a[::, ::] = self.c + a[2:3:, 0:, 4:1:-1] = b + a[2:3:, 0:, 4:1:-1] = self.c z = a + ck return z @@ -149,18 +149,18 @@ class TensorAssignWithSlice(Cell): def test_tensor_assign(): context.set_context(mode=context.GRAPH_MODE, save_graphs=True) net = TensorAssignWithSlice() - net2= TensorAssignWithSlice2() + net2 = TensorAssignWithSlice2() net_e1 = TensorAssignWithSliceError1() net_e2 = TensorAssignWithSliceError2() - a = np.arange(60).reshape(3,4,5) - ck = np.arange(60).reshape(3,4,5) + a = np.arange(60).reshape(3, 4, 5) + ck = np.arange(60).reshape(3, 4, 5) b = Tensor([1], dtype=mstype.float32) Ta = Tensor(a, dtype=mstype.float32) Tck = Tensor(ck, dtype=mstype.float32) - Ta4d = Tensor(a.reshape(1,3,4,5), dtype=mstype.float32) - Ta4d_ck = Tensor(ck.reshape(1,3,4,5), dtype=mstype.float32) - Tb= Tensor([1,3], dtype=mstype.float32) - Tc= Tensor([], dtype=mstype.float32) + Ta4d = Tensor(a.reshape(1, 3, 4, 5), dtype=mstype.float32) + Ta4d_ck = Tensor(ck.reshape(1, 3, 4, 5), dtype=mstype.float32) + Tb = Tensor([1, 3], dtype=mstype.float32) + Tc = Tensor([], dtype=mstype.float32) t = Tensor([1, 2, 3, 4, 5, 6, 7, 8], dtype=mstype.float32) tck = Tensor([1, 2, 3, 4, 5, 6, 7, 8], dtype=mstype.float32) net(Ta, b, Tck) @@ -219,15 +219,15 @@ def test_tensor_assign(): with pytest.raises(IndexError): net(Ta4d, b, Ta4d_ck) - #Error for A[...] = U or A[1:, ...] = u - #1. A[...] = scalar/tensor + # Error for A[...] = U or A[1:, ...] = u + # 1. A[...] = scalar/tensor net = TensorAssignWithEllipsis() net(Ta, Ta4d) with pytest.raises(ValueError): net(Ta, Tc) with pytest.raises(ValueError): net(Ta, Tb) - #2. A[::, 1:, ...] = scalar/tensor + # 2. A[::, 1:, ...] = scalar/tensor net = TensorAssignWithTupleEllipsis() net(Ta, b) with pytest.raises(ValueError): @@ -239,6 +239,7 @@ def test_tensor_assign(): class TensorAssignWithTupleEllipsis2(Cell): def __init__(self): super(TensorAssignWithTupleEllipsis2, self).__init__() + def construct(self, a, b): a[1:, ..., ::] = b return a @@ -247,6 +248,7 @@ class TensorAssignWithTupleEllipsis2(Cell): class TensorAssignWithTupleEllipsis(Cell): def __init__(self): super(TensorAssignWithTupleEllipsis, self).__init__() + def construct(self, a, b): a[:2, ...] = 1 a[1:, ...] = b @@ -256,6 +258,7 @@ class TensorAssignWithTupleEllipsis(Cell): class TensorAssignWithEllipsis(Cell): def __init__(self): super(TensorAssignWithEllipsis, self).__init__() + def construct(self, a, b): a[...] = 1 a[...] = b @@ -272,6 +275,7 @@ class TensorAssignWithInteger(Cell): z = a + ck return z + class TensorAssignWithTupleInteger(Cell): def __init__(self): super(TensorAssignWithTupleInteger, self).__init__() @@ -279,15 +283,16 @@ class TensorAssignWithTupleInteger(Cell): def construct(self, a, b, ck): a[(1)] = 1 a[(1)] = b - a[(1,1)] = b - a[(1,1)] = 1 + a[(1, 1)] = b + a[(1, 1)] = 1 z = a + ck return z + class TensorAssignWithBoolTensorIndex(Cell): def __init__(self): super(TensorAssignWithBoolTensorIndex, self).__init__() - self.t = Tensor(np.arange(60).reshape([3,4,5]), dtype = mstype.float32) + self.t = Tensor(np.arange(60).reshape([3, 4, 5]), dtype=mstype.float32) self.u_scalar = 5 def construct(self, a, b, c, u_tensor): @@ -310,7 +315,7 @@ class TensorAssignWithBoolTensorIndex2(Cell): def __init__(self): super(TensorAssignWithBoolTensorIndex2, self).__init__() self.t = Tensor(np.arange(6).reshape([2, 3]), dtype=mstype.float32) - self.t = Tensor(np.arange(60).reshape([3,4,5]), dtype = mstype.float32) + self.t = Tensor(np.arange(60).reshape([3, 4, 5]), dtype=mstype.float32) self.u_scalar = 5 def construct(self, a, u_tensor): @@ -349,6 +354,7 @@ t_1d = Tensor([1, 2, 3, 4, 5, 6, 7, 8], dtype=mstype.float32) tck_1d = Tensor([1, 2, 3, 4, 5, 6, 7, 8], dtype=mstype.float32) u_scalar = 5 + def test_tensor_assign_bool_index(): net1 = TensorAssignWithBoolTensorIndex() net2 = TensorAssignWithBoolTensorIndex2() @@ -378,34 +384,35 @@ def test_tensor_assign_bool_index(): with pytest.raises(AttributeError): net4(Ta, u_scalar) + test_cases = [ ('TensorAssignWithTupleEllipsis2', { 'block': TensorAssignWithTupleEllipsis2(), - 'desc_inputs': [Ta4, u_tensor], + 'desc_inputs': [Ta4, u_tensor], }), ('TensorAssignWithTupleEllipsis', { 'block': TensorAssignWithTupleEllipsis(), - 'desc_inputs': [Ta, u_tensor], + 'desc_inputs': [Ta, u_tensor], }), ('TensorAssignWithEllipsis', { 'block': TensorAssignWithEllipsis(), - 'desc_inputs': [Ta, u_tensor], + 'desc_inputs': [Ta, u_tensor], }), ('TensorAssignWithTupleInteger', { 'block': TensorAssignWithTupleInteger(), - 'desc_inputs': [Ta, u_tensor, Tck], + 'desc_inputs': [Ta, u_tensor, Tck], }), ('TensorAssignWithInteger', { 'block': TensorAssignWithInteger(), - 'desc_inputs': [Ta, u_tensor, Tck], + 'desc_inputs': [Ta, u_tensor, Tck], }), ('TensorAssignWithSlice', { 'block': TensorAssignWithSlice(), - 'desc_inputs': [Ta, u_tensor, Tck], + 'desc_inputs': [Ta, u_tensor, Tck], }), ('TensorAssignWithSlice2', { 'block': TensorAssignWithSlice2(), - 'desc_inputs': [t_1d, u_tensor, tck_1d], + 'desc_inputs': [t_1d, u_tensor, tck_1d], }), ('TensorAssignWithBoolTensorIndex', { 'block': TensorAssignWithBoolTensorIndex(), @@ -458,7 +465,8 @@ def test_tensor_slice_reduce_out_of_bounds_neg(): net = NetWork() with pytest.raises(ValueError) as ex: net(input_tensor) - assert "For 'StridedSlice' the `begin[0]` should be an int and must greater or equal to -6, but got `-7`" in str(ex.value) + assert "For 'StridedSlice' the `begin[0]` should be an int and must greater or equal to -6, but got `-7`" in str( + ex.value) def test_tensor_slice_reduce_out_of_bounds_positive(): diff --git a/tests/ut/python/ops/test_tuple.py b/tests/ut/python/ops/test_tuple.py index 5b3d5d52ae..f0cd7fc59b 100644 --- a/tests/ut/python/ops/test_tuple.py +++ b/tests/ut/python/ops/test_tuple.py @@ -24,6 +24,7 @@ from ..ut_filter import non_graph_engine from ....mindspore_test_framework.mindspore_test import mindspore_test from ....mindspore_test_framework.pipeline.forward.compile_forward \ import pipeline_for_compile_forward_ge_graph_for_case_by_case_config + context.set_context(mode=context.GRAPH_MODE, save_graphs=True) @@ -63,6 +64,8 @@ test_case_ops = [ test_case_lists = [test_case_ops] test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) + + # use -k to select certain testcast # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm diff --git a/tests/ut/python/ops/test_tuple_slice.py b/tests/ut/python/ops/test_tuple_slice.py index ea5112995c..2c0bde7eef 100644 --- a/tests/ut/python/ops/test_tuple_slice.py +++ b/tests/ut/python/ops/test_tuple_slice.py @@ -29,6 +29,7 @@ from ....mindspore_test_framework.pipeline.forward.verify_exception \ class NetWork_1(Cell): """ NetWork_1 definition """ + def __init__(self): super(NetWork_1, self).__init__() self.addN = P.AddN() @@ -48,6 +49,7 @@ class NetWork_1(Cell): class NetWork_2(Cell): """ NetWork_2 definition """ + def __init__(self): super(NetWork_2, self).__init__() self.addN = P.AddN() @@ -69,6 +71,7 @@ class NetWork_2(Cell): class NetWork_3(Cell): """ NetWork_3 definition """ + def __init__(self): super(NetWork_3, self).__init__() self.addN = P.AddN() @@ -100,19 +103,18 @@ test_cases = [ }), ] - test_cases_for_verify_exception = [ ('SliceStartCross', { 'block': (NetWork_3(), {'exception': RuntimeError}), 'desc_inputs': [*(Tensor(np.ones([2, 3, 4], np.int32)), - Tensor(np.zeros([2, 3, 4], np.int32)), - Tensor(np.ones([2, 3, 4], np.int32)))], + Tensor(np.zeros([2, 3, 4], np.int32)), + Tensor(np.ones([2, 3, 4], np.int32)))], }), ('SliceStepZero', { 'block': (NetWork_3(), {'exception': RuntimeError}), 'desc_inputs': [*(Tensor(np.ones([2, 3, 4], np.int32)), - Tensor(np.zeros([2, 3, 4], np.int32)), - Tensor(np.ones([2, 3, 4], np.int32)))], + Tensor(np.zeros([2, 3, 4], np.int32)), + Tensor(np.ones([2, 3, 4], np.int32)))], }), ] diff --git a/tests/ut/python/parallel/conftest.py b/tests/ut/python/parallel/conftest.py index fb333fb919..75d16a5ca0 100644 --- a/tests/ut/python/parallel/conftest.py +++ b/tests/ut/python/parallel/conftest.py @@ -18,6 +18,7 @@ from mindspore.parallel._cost_model_context import reset_cost_model_context from mindspore.parallel.algo_parameter_config import reset_algo_parameters from mindspore.parallel._utils import _reset_op_id + @pytest.fixture(scope="function", autouse="True") def reset_test_context(): context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/parallel_end_to_end/add_relu/_test_add_relu_parallel_4p.py b/tests/ut/python/parallel/parallel_end_to_end/add_relu/_test_add_relu_parallel_4p.py index 8a24a2190c..0feaff24d1 100644 --- a/tests/ut/python/parallel/parallel_end_to_end/add_relu/_test_add_relu_parallel_4p.py +++ b/tests/ut/python/parallel/parallel_end_to_end/add_relu/_test_add_relu_parallel_4p.py @@ -23,10 +23,11 @@ from mindspore.common.tensor import Tensor import mindspore.communication.management as distributedTool from mindspore.ops.composite import grad_all_with_sens -device_num=4 +device_num = 4 device_id = int(os.environ["RANK_ID"]) path = "./output/" + def setup_module(): print("~~~~~~~~~~~set up~~~~~~~~~~~~~") context.set_context(mode=context.GRAPH_MODE) @@ -35,9 +36,11 @@ def setup_module(): distributedTool.create_group("0-3", [0, 1, 2, 3]) print("~~~~~~~~~~~set up finished~~~~~~~~~~~~~") + def teardown_module(): print("~~~~~~~~~~~~tear down~~~~~~~~~~") - + + class AddRelu(Cell): def __init__(self, strategy0=None, strategy1=None): super(AddRelu, self).__init__() @@ -64,23 +67,25 @@ class AddReluFactory: size = 1 for s in input_shape: prefix = prefix + str(s) - size = size*s + size = size * s self.prefix = prefix number_range = min(1000, size) - self.input_np1 = np.reshape(np.arange(0, size)%number_range - number_range/2, input_shape).astype(np.float32) + self.input_np1 = np.reshape(np.arange(0, size) % number_range - number_range / 2, input_shape).astype( + np.float32) self.input_np2 = 1.0 - self.output_grad_np = np.reshape((np.arange(0, size)%(number_range-10) - number_range/2)*0.1, input_shape).astype(np.float32) + self.output_grad_np = np.reshape((np.arange(0, size) % (number_range - 10) - number_range / 2) * 0.1, + input_shape).astype(np.float32) self.strategy0 = strategy0 self.strategy1 = strategy1 need_dev_num = 1 need_dev_num_ = 1 for s in strategy0[1]: - need_dev_num = need_dev_num*s + need_dev_num = need_dev_num * s for s in strategy1[1]: - need_dev_num_ = need_dev_num_*s - self.x_id = device_id%need_dev_num - self.y_id = device_id%need_dev_num - self.out_id = device_id%need_dev_num_ + need_dev_num_ = need_dev_num_ * s + self.x_id = device_id % need_dev_num + self.y_id = device_id % need_dev_num + self.out_id = device_id % need_dev_num_ def forward_mindspore_impl(self): net = AddRelu() @@ -97,10 +102,10 @@ class AddReluFactory: y = Tensor(self.input_np2, ms.float32) inputs_x = self.get_parallel_blocks(self.input_np1, self.strategy0[1]) x1 = Tensor(inputs_x[self.x_id]) - y1 = Tensor(self.input_np2, ms.float32) + y1 = Tensor(self.input_np2, ms.float32) out = net(x, y, parallel_inputs_compile=[x, y], parallel_inputs_run=[x1, y1]) return out.asnumpy() - + def grad_mindspore_impl(self): output_grad = Tensor(self.output_grad_np) x = Tensor(self.input_np1) @@ -113,7 +118,7 @@ class AddReluFactory: def grad_mindspore_parallel_impl(self): output_grads = self.get_parallel_blocks(self.output_grad_np, self.strategy1[1]) - output_grad = Tensor(output_grads[self.out_id]) + output_grad = Tensor(output_grads[self.out_id]) x = Tensor(self.input_np1) y = Tensor(self.input_np2, ms.float32) net = AddRelu(strategy0=self.strategy0, strategy1=self.strategy1) @@ -124,21 +129,22 @@ class AddReluFactory: inputs_x = self.get_parallel_blocks(self.input_np1, self.strategy0[1]) x1 = Tensor(inputs_x[self.x_id]) y1 = Tensor(self.input_np2, ms.float32) - input_grad = grad_net(x, y, output_grad, parallel_inputs_compile=[x, y, output_grad], parallel_inputs_run=[x1, y1, output_grad]) + input_grad = grad_net(x, y, output_grad, parallel_inputs_compile=[x, y, output_grad], + parallel_inputs_run=[x1, y1, output_grad]) return input_grad - + def get_parallel_blocks(self, input_, strategy): blocks = [input_] i = 0 for stra in strategy: temp = [] - while len(blocks)>0: + while len(blocks) > 0: block = blocks.pop(0) temp.extend(np.split(block, stra, axis=i)) blocks.extend(temp) - i+=1 + i += 1 return blocks - + def forward_cmp(self): out_mindspore = self.forward_mindspore_impl() out_mindspore_parallel = self.forward_mindspore_parallel_impl() @@ -153,17 +159,19 @@ class AddReluFactory: input_grad_mindspore_parallel0 = input_grad_mindspore_parallel[0].asnumpy() input_grad_mindspore_parallel1 = input_grad_mindspore_parallel[1].asnumpy() assert np.allclose(input_grad_mindspore1, input_grad_mindspore_parallel1, 0.0001, 0.0001) - + + @pytest.mark.reid_forward def test_reid_add_relu_input_256_64(): - stra0 = (0,(2,2),()) - stra1 = (0,(2,2)) + stra0 = (0, (2, 2), ()) + stra1 = (0, (2, 2)) fact = AddReluFactory(input_shape=(256, 64), strategy0=stra0, strategy1=stra1) fact.forward_cmp() + @pytest.mark.reid_grad def test_reid_grad_add_relu_input_256_64(): - stra0 = (0,(2,2),()) - stra1 = (0,(2,2)) + stra0 = (0, (2, 2), ()) + stra1 = (0, (2, 2)) fact = AddReluFactory(input_shape=(256, 64), strategy0=stra0, strategy1=stra1) fact.grad_cmp() diff --git a/tests/ut/python/parallel/parallel_end_to_end/batch_parallel/_test_conv2d_parallel_4p.py b/tests/ut/python/parallel/parallel_end_to_end/batch_parallel/_test_conv2d_parallel_4p.py index 5d22db972e..e6f196b3a5 100644 --- a/tests/ut/python/parallel/parallel_end_to_end/batch_parallel/_test_conv2d_parallel_4p.py +++ b/tests/ut/python/parallel/parallel_end_to_end/batch_parallel/_test_conv2d_parallel_4p.py @@ -25,10 +25,11 @@ from mindspore._checkparam import check_bool, twice from mindspore.common.initializer import initializer from mindspore.ops.composite import grad_all_with_sens -device_num=4 +device_num = 4 device_id = int(os.environ["RANK_ID"]) path = "./output/" + def setup_module(): print("~~~~~~~~~~~set up~~~~~~~~~~~~~") context.set_context(mode=context.GRAPH_MODE) @@ -37,6 +38,7 @@ def setup_module(): distributedTool.create_group("0-3", [0, 1, 2, 3]) print("~~~~~~~~~~~set up finished~~~~~~~~~~~~~") + def teardown_module(): print("~~~~~~~~~~~~tear down~~~~~~~~~~") @@ -45,6 +47,7 @@ class _Conv(Cell): r"""Applies a N-D convolution over an input signal composed of several input planes. """ + def __init__(self, in_channels, out_channels, @@ -69,12 +72,12 @@ class _Conv(Cell): self.has_bias = has_bias if not (isinstance(in_channels, int) and in_channels > 0): raise ValueError('Attr \'in_channels\' of \'Conv2D\' Op passed ' - +str(in_channels)+ ', should be a int and greater than 0.') + + str(in_channels) + ', should be a int and greater than 0.') if (not isinstance(kernel_size, tuple)) or len(kernel_size) != 2 or \ - (not isinstance(kernel_size[0], int)) or (not isinstance(kernel_size[1], int)) or \ + (not isinstance(kernel_size[0], int)) or (not isinstance(kernel_size[1], int)) or \ kernel_size[0] < 1 or kernel_size[1] < 1: raise ValueError('Attr \'kernel_size\' of \'Conv2D\' Op passed ' - +str(self.kernel_size)+', should be a int or tuple and equal to or greater than 1.') + + str(self.kernel_size) + ', should be a int or tuple and equal to or greater than 1.') if in_channels % group != 0: raise ValueError('Attr \'in_channels\' of \'Conv2D\' Op must be divisible by ' 'attr \'group\' of \'Conv2D\' Op.') @@ -141,9 +144,9 @@ class Conv2d(_Conv): if self.has_bias: return self.bias_add(self.conv2d(x, self.weight), self.bias) - return self.conv2d(x, self.weight) - - + return self.conv2d(x, self.weight) + + class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() @@ -162,49 +165,52 @@ class Conv2dFactory: self.padding = padding self.dilation = dilation self.group = group - self.strategy0 = (0,(4,1,1,1),(1,1,1,1)) + self.strategy0 = (0, (4, 1, 1, 1), (1, 1, 1, 1)) prefix = "" input_size = 1 filter_size = 1 for s in input_shape: prefix = prefix + str(s) + "_" - input_size = input_size*s + input_size = input_size * s self.prefix = prefix for s in filter_shape: - filter_size = filter_size*s + filter_size = filter_size * s number_range1 = min(10, input_size) - number_range2 = min(10, filter_size) - self.input_np1 = np.reshape(np.arange(0, input_size)%number_range1 - number_range1/2, input_shape).astype(np.float16) - self.input_np2 = np.reshape(np.arange(0, input_size)%number_range1 - number_range1/4, input_shape).astype(np.float16) - self.weight_np = np.reshape(np.arange(0, filter_size)%number_range2 - number_range2/2, filter_shape).astype(np.float16) + number_range2 = min(10, filter_size) + self.input_np1 = np.reshape(np.arange(0, input_size) % number_range1 - number_range1 / 2, input_shape).astype( + np.float16) + self.input_np2 = np.reshape(np.arange(0, input_size) % number_range1 - number_range1 / 4, input_shape).astype( + np.float16) + self.weight_np = np.reshape(np.arange(0, filter_size) % number_range2 - number_range2 / 2, filter_shape).astype( + np.float16) self.has_bias = has_bias if self.has_bias is True: self.bias_np = np.arange(0, self.out_c).astype(np.float16) - self.out_shape = (128,64,56,56) + self.out_shape = (128, 64, 56, 56) out_size = 1 for s in self.out_shape: - out_size = out_size*s - number_range3 = min(10, out_size) - self.output_grad_np = np.reshape(np.arange(0, out_size)%number_range3 - number_range3/2, self.out_shape).astype(np.float16) - self.x_id = device_id%4 - self.y_id = device_id%4 + out_size = out_size * s + number_range3 = min(10, out_size) + self.output_grad_np = np.reshape(np.arange(0, out_size) % number_range3 - number_range3 / 2, + self.out_shape).astype(np.float16) + self.x_id = device_id % 4 + self.y_id = device_id % 4 self.out_strategy = self.strategy0[1] - self.out_id = device_id%4 - + self.out_id = device_id % 4 + def get_parallel_blocks(self, input_, strategy): blocks = [input_] i = 0 for stra in strategy: temp = [] - while len(blocks)>0: + while len(blocks) > 0: block = blocks.pop(0) temp.extend(np.split(block, stra, axis=i)) blocks.extend(temp) - i+=1 - return blocks - - + i += 1 + return blocks + def forward_conv2d_mindspore_impl(self): input1 = Tensor(self.input_np1) input2 = Tensor(self.input_np2) @@ -225,7 +231,7 @@ class Conv2dFactory: group=self.group, has_bias=False, weight_init=weight) out = net(input1, input2) return out.asnumpy() - + def forward_conv2d_mindspore_parallel_impl(self): x = Tensor(self.input_np1) y = Tensor(self.input_np2) @@ -247,7 +253,8 @@ class Conv2dFactory: kernel_size=(self.kernel_h, self.kernel_w), stride=self.stride, pad_mode=self.pad_mode, padding=self.padding, dilation=self.dilation, - group=self.group, has_bias=False, weight_init=weight, strategy=(self.strategy0[0], self.strategy0[1], self.strategy0[1])) + group=self.group, has_bias=False, weight_init=weight, + strategy=(self.strategy0[0], self.strategy0[1], self.strategy0[1])) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() out = net(x, y, parallel_inputs_compile=[x, y], parallel_inputs_run=[x1, y1]) @@ -265,7 +272,7 @@ class Conv2dFactory: stride=self.stride, pad_mode=self.pad_mode, padding=self.padding, dilation=self.dilation, group=self.group, has_bias=True, weight_init=weight, - bias_init=bias,) + bias_init=bias, ) else: net = Conv2d(in_channels=self.in_c, out_channels=self.out_c, kernel_size=(self.kernel_h, self.kernel_w), @@ -273,12 +280,11 @@ class Conv2dFactory: padding=self.padding, dilation=self.dilation, group=self.group, has_bias=False, weight_init=weight) - grad_net = Grad(net) grad_net.set_train() out_grad = grad_net(x, y, output_grad) return out_grad - + def grad_conv2d_mindspore_parallel_impl(self): x = Tensor(self.input_np1) y = Tensor(self.input_np2) @@ -303,16 +309,16 @@ class Conv2dFactory: kernel_size=(self.kernel_h, self.kernel_w), stride=self.stride, pad_mode=self.pad_mode, padding=self.padding, dilation=self.dilation, - group=self.group, has_bias=False, weight_init=weight, strategy=(self.strategy0[0], self.strategy0[1], self.strategy0[1])) - + group=self.group, has_bias=False, weight_init=weight, + strategy=(self.strategy0[0], self.strategy0[1], self.strategy0[1])) grad_net = Grad(net) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") grad_net.set_train() grad_net.set_auto_parallel() - out_grad = grad_net(x, y, output_grad, parallel_inputs_compile=[x, y, output_grad1], parallel_inputs_run=[x1, y1, output_grad1]) + out_grad = grad_net(x, y, output_grad, parallel_inputs_compile=[x, y, output_grad1], + parallel_inputs_run=[x1, y1, output_grad1]) return out_grad - def forward_conv2d_cmp(self): out_mindspore = self.forward_conv2d_mindspore_impl() @@ -332,19 +338,18 @@ class Conv2dFactory: assert allclose(input_grad_blocks_0[self.x_id], input_grad_mindspore_parallel0, 0.001, 0.001) assert allclose(input_grad_blocks_1[self.x_id], input_grad_mindspore_parallel1, 0.001, 0.001) + def test_reid_conv2d_input_128_64_112_112_kernel_64_64_1_1_stride_2_padding_0_bias_true(): - fact = Conv2dFactory(input_shape=(128,64,112,112), - filter_shape=(64,64,1,1), - stride=2, pad_mode='valid', padding=0, - dilation=1, group=1, has_bias=False) + fact = Conv2dFactory(input_shape=(128, 64, 112, 112), + filter_shape=(64, 64, 1, 1), + stride=2, pad_mode='valid', padding=0, + dilation=1, group=1, has_bias=False) fact.forward_conv2d_cmp() def test_reid_conv2d_grad_input_128_64_112_112_kernel_64_64_1_1_stride_2_padding_0_bias_true(): - fact = Conv2dFactory(input_shape=(128,64,112,112), - filter_shape=(64,64,1,1), - stride=2, pad_mode='valid', padding=0, - dilation=1, group=1, has_bias=False) + fact = Conv2dFactory(input_shape=(128, 64, 112, 112), + filter_shape=(64, 64, 1, 1), + stride=2, pad_mode='valid', padding=0, + dilation=1, group=1, has_bias=False) fact.grad_conv2d_cmp() - - diff --git a/tests/ut/python/parallel/parallel_end_to_end/dropout/_test_dropout_parallel_4p.py b/tests/ut/python/parallel/parallel_end_to_end/dropout/_test_dropout_parallel_4p.py index c225776a66..0e1c7e2dce 100644 --- a/tests/ut/python/parallel/parallel_end_to_end/dropout/_test_dropout_parallel_4p.py +++ b/tests/ut/python/parallel/parallel_end_to_end/dropout/_test_dropout_parallel_4p.py @@ -21,10 +21,11 @@ from mindspore import context from mindspore.nn import Dropout from mindspore.common.tensor import Tensor -device_num=4 +device_num = 4 device_id = int(os.environ["RANK_ID"]) path = "./output/" + def setup_module(): print("~~~~~~~~~~~set up~~~~~~~~~~~~~") context.set_context(mode=context.GRAPH_MODE) @@ -33,6 +34,7 @@ def setup_module(): distributedTool.create_group("0-3", [0, 1, 2, 3]) print("~~~~~~~~~~~set up finished~~~~~~~~~~~~~") + def teardown_module(): print("~~~~~~~~~~~~tear down~~~~~~~~~~") @@ -44,51 +46,52 @@ class Net(Cell): def construct(self, input): x = self.drop(input) - return x - + return x + + class DropoutFactory: def __init__(self, input_shape, keep_prob, seed0, seed1, strategy0=None): size = 1 prefix = "" for s in input_shape: prefix = prefix + str(s) - size = size*s + size = size * s self.prefix = prefix number_range = min(10, size) - self.input_np = np.reshape(np.arange(0, size)%number_range, input_shape).astype(np.float32) + self.input_np = np.reshape(np.arange(0, size) % number_range, input_shape).astype(np.float32) self.keep_prob = keep_prob self.seed0 = seed0 self.seed1 = seed1 self.strategy0 = strategy0 need_dev_num = 1 for s in strategy0[1]: - need_dev_num = need_dev_num*s - self.x_id = device_id%need_dev_num - self.out_id = device_id%need_dev_num - + need_dev_num = need_dev_num * s + self.x_id = device_id % need_dev_num + self.out_id = device_id % need_dev_num + def get_parallel_blocks(self, input_, strategy): blocks = [input_] i = 0 for stra in strategy: temp = [] - while len(blocks)>0: + while len(blocks) > 0: block = blocks.pop(0) temp.extend(np.split(block, stra, axis=i)) blocks.extend(temp) - i+=1 - return blocks - + i += 1 + return blocks + def d4_tensor_compare(self, input, out_me): - [a,b,c,d] = input.shape + [a, b, c, d] = input.shape for i in range(a): for j in range(b): for k in range(c): for e in range(d): - if out_me[i,j,k,e] == 0: + if out_me[i, j, k, e] == 0: assert True == True else: - assert np.allclose(out_me[i,j,k,e], input[i,j,k,e]*(1/0.4), 0.0001, 0.0001) - + assert np.allclose(out_me[i, j, k, e], input[i, j, k, e] * (1 / 0.4), 0.0001, 0.0001) + def forward_mindspore_parallel_impl(self): x = Tensor(self.input_np) inputs_x = self.get_parallel_blocks(self.input_np, self.strategy0[1]) @@ -98,16 +101,18 @@ class DropoutFactory: net.set_auto_parallel() out = net(x, parallel_inputs_compile=[x], parallel_inputs_run=[x1]) return out.asnumpy() - + def forward_cmp(self): out_mindspore_parallel = self.forward_mindspore_parallel_impl() input_blocks = self.get_parallel_blocks(self.input_np, self.strategy0[1]) self.d4_tensor_compare(input_blocks[self.out_id], out_mindspore_parallel) - + + def test_reid_dropout_forward_seed_F32_64_512_8_8(): - fact = DropoutFactory(input_shape=(64,512,8,8), keep_prob = 0.4, seed0 = 0, seed1 = 0, strategy0=(0,(4,1,1,1))) + fact = DropoutFactory(input_shape=(64, 512, 8, 8), keep_prob=0.4, seed0=0, seed1=0, strategy0=(0, (4, 1, 1, 1))) fact.forward_cmp() + def test_reid_dropout_forward_seed_F32_64_512_8_8_repeat(): - fact = DropoutFactory(input_shape=(64,512,8,8), keep_prob = 0.4, seed0 = 0, seed1 = 0, strategy0=(0,(2,1,1,1))) - fact.forward_cmp() \ No newline at end of file + fact = DropoutFactory(input_shape=(64, 512, 8, 8), keep_prob=0.4, seed0=0, seed1=0, strategy0=(0, (2, 1, 1, 1))) + fact.forward_cmp() diff --git a/tests/ut/python/parallel/parallel_end_to_end/hcom/_test_allgather_4p.py b/tests/ut/python/parallel/parallel_end_to_end/hcom/_test_allgather_4p.py index 6f14d068cd..b0e4aba2ff 100644 --- a/tests/ut/python/parallel/parallel_end_to_end/hcom/_test_allgather_4p.py +++ b/tests/ut/python/parallel/parallel_end_to_end/hcom/_test_allgather_4p.py @@ -22,10 +22,11 @@ from mindspore.common.tensor import Tensor import mindspore.communication.management as distributedTool from mindspore.ops.composite import grad_all_with_sens -device_num=4 +device_num = 4 device_id = int(os.environ["RANK_ID"]) path = "./output/" + def setup_module(): print("~~~~~~~~~~~set up~~~~~~~~~~~~~") context.set_context(mode=context.GRAPH_MODE) @@ -33,22 +34,25 @@ def setup_module(): distributedTool.init() print("~~~~~~~~~~~set up finished~~~~~~~~~~~~~") + def teardown_module(): print("~~~~~~~~~~~~tear down~~~~~~~~~~") - + + class MatmulSingle(Cell): def __init__(self, transpose_a=False, transpose_b=False): super(MatmulSingle, self).__init__() self.matmul = P.MatMul(transpose_a, transpose_b) self.pow = P.Pow() self.reduce_sum = P.ReduceSum() + def construct(self, x, y): out = self.matmul(x, y) - out = self.pow(out,2.0) + out = self.pow(out, 2.0) out = self.reduce_sum(out, None) return out - - + + class MatmulAllgather(Cell): def __init__(self, group, transpose_a=False, transpose_b=False): super(MatmulAllgather, self).__init__() @@ -57,14 +61,16 @@ class MatmulAllgather(Cell): self.pow = P.Pow() self.reduce_sum = P.ReduceSum() self.allreduce = P.AllReduce(group=group) + def construct(self, x, y): x = self.allgather(x) out = self.matmul(x, y) - out = self.pow(out,2.0) + out = self.pow(out, 2.0) out = self.reduce_sum(out, None) out = self.allreduce(out) return out + class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() @@ -72,76 +78,76 @@ class Grad(Cell): def construct(self, x, y, sens): return grad_all_with_sens(self.network)(x, y, sens) - + + class MatmulAllgatherFactory: def __init__(self, inputx_shape, inputy_shape, x_stra, y_stra): - self.inputx=self.GenValue(inputx_shape, 10) - self.inputy=self.GenValue(inputy_shape, 20) + self.inputx = self.GenValue(inputx_shape, 10) + self.inputy = self.GenValue(inputy_shape, 20) self.x_stra = x_stra self.y_stra = y_stra - stra_size= 1 + stra_size = 1 for s in x_stra: - stra_size = stra_size*s + stra_size = stra_size * s self.stra_size = stra_size - + def GenValue(self, input_shape, delta): size = 1 for s in input_shape: - size = size*s + size = size * s number_range = min(100, size) - input_np = np.reshape(np.arange(0, size)%number_range - delta, input_shape).astype(np.float32) + input_np = np.reshape(np.arange(0, size) % number_range - delta, input_shape).astype(np.float32) return input_np - + def get_parallel_blocks(self, input_, strategy): blocks = [input_] i = 0 for stra in strategy: temp = [] - while len(blocks)>0: + while len(blocks) > 0: block = blocks.pop(0) temp.extend(np.split(block, stra, axis=i)) blocks.extend(temp) - i+=1 - return blocks - + i += 1 + return blocks + def grad_mindspore_impl_single(self): - x=Tensor(self.inputx) - y=Tensor(self.inputy) - sens=Tensor(1.0, dtype=ms.float32) + x = Tensor(self.inputx) + y = Tensor(self.inputy) + sens = Tensor(1.0, dtype=ms.float32) net = MatmulSingle() grad_net = Grad(net) - grad_net.set_train() + grad_net.set_train() input_grad = grad_net(x, y, sens) return input_grad - + def grad_mindspore_impl_reduce(self): inputxs = self.get_parallel_blocks(self.inputx, self.x_stra) inputys = self.get_parallel_blocks(self.inputy, self.y_stra) - x = Tensor(inputxs[device_id%self.stra_size]) - y = Tensor(inputys[device_id%self.stra_size]) - repeat_num = device_num/self.stra_size - v = self.stra_size*repeat_num*repeat_num*repeat_num - sens = Tensor(1.0/v, dtype=ms.float32) + x = Tensor(inputxs[device_id % self.stra_size]) + y = Tensor(inputys[device_id % self.stra_size]) + repeat_num = device_num / self.stra_size + v = self.stra_size * repeat_num * repeat_num * repeat_num + sens = Tensor(1.0 / v, dtype=ms.float32) net = MatmulAllgather("hccl_world_group") grad_net = Grad(net) - grad_net.set_train() + grad_net.set_train() input_grad = grad_net(x, y, sens) return input_grad - + def grad_cmp(self): single_results = self.grad_mindspore_impl_single() reduce_results = self.grad_mindspore_impl_reduce() - single_result0 = self.get_parallel_blocks(single_results[0].asnumpy(), self.x_stra)[device_id%self.stra_size] + single_result0 = self.get_parallel_blocks(single_results[0].asnumpy(), self.x_stra)[device_id % self.stra_size] reduce_result0 = reduce_results[0].asnumpy() - single_result1 = self.get_parallel_blocks(single_results[1].asnumpy(), self.y_stra)[device_id%self.stra_size] + single_result1 = self.get_parallel_blocks(single_results[1].asnumpy(), self.y_stra)[device_id % self.stra_size] reduce_result1 = reduce_results[1].asnumpy() assert np.allclose(single_result0, reduce_result0, 0.0001, 0.0001) assert np.allclose(single_result1, reduce_result1, 0.0001, 0.0001) - + + def test_reduce_grad(): - inputx_shape = (64,32) - inputy_shape = (32,64) - fact = MatmulAllgatherFactory(inputx_shape, inputy_shape, (4,1), (1,4)) + inputx_shape = (64, 32) + inputy_shape = (32, 64) + fact = MatmulAllgatherFactory(inputx_shape, inputy_shape, (4, 1), (1, 4)) fact.grad_cmp() - - \ No newline at end of file diff --git a/tests/ut/python/parallel/parallel_end_to_end/hcom/_test_allreduce_4p.py b/tests/ut/python/parallel/parallel_end_to_end/hcom/_test_allreduce_4p.py index ace9c2d642..8b6d9b138e 100644 --- a/tests/ut/python/parallel/parallel_end_to_end/hcom/_test_allreduce_4p.py +++ b/tests/ut/python/parallel/parallel_end_to_end/hcom/_test_allreduce_4p.py @@ -22,10 +22,11 @@ from mindspore.common.tensor import Tensor import mindspore.communication.management as distributedTool from mindspore.ops.composite import grad_all_with_sens -device_num=4 +device_num = 4 device_id = int(os.environ["RANK_ID"]) path = "./output/" + def setup_module(): print("~~~~~~~~~~~set up~~~~~~~~~~~~~") context.set_context(mode=context.GRAPH_MODE) @@ -33,9 +34,11 @@ def setup_module(): distributedTool.init() print("~~~~~~~~~~~set up finished~~~~~~~~~~~~~") + def teardown_module(): print("~~~~~~~~~~~~tear down~~~~~~~~~~") - + + class MatmulSingle(Cell): def __init__(self, transpose_a=False, transpose_b=False): super(MatmulSingle, self).__init__() @@ -43,13 +46,15 @@ class MatmulSingle(Cell): self.matmul2 = P.MatMul(transpose_a, transpose_b) self.pow = P.Pow() self.reduce_sum = P.ReduceSum() + def construct(self, x, y, z): out = self.matmul1(x, y) out = self.matmul2(out, z) - out = self.pow(out,2.0) + out = self.pow(out, 2.0) out = self.reduce_sum(out, None) return out - + + class MatmulReduce(Cell): def __init__(self, group, transpose_a=False, transpose_b=False): super(MatmulReduce, self).__init__() @@ -59,15 +64,17 @@ class MatmulReduce(Cell): self.pow = P.Pow() self.reduce_sum = P.ReduceSum() self.allreduce2 = P.AllReduce(group=group) + def construct(self, x, y, z): out = self.matmul1(x, y) out = self.allreduce1(out) out = self.matmul2(out, z) - out = self.pow(out,2.0) + out = self.pow(out, 2.0) out = self.reduce_sum(out, None) out = self.allreduce2(out) return out + class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() @@ -75,92 +82,93 @@ class Grad(Cell): def construct(self, x, y, z, sens): return grad_all_with_sens(self.network)(x, y, z, sens) - + + class MatmulReduceFactory: def __init__(self, inputx_shape, inputy_shape, inputz_shape, x_stra, y_stra, z_stra): - self.inputx=self.GenValue(inputx_shape, 10) - self.inputy=self.GenValue(inputy_shape, 20) - self.inputz=self.GenValue(inputz_shape, 30) + self.inputx = self.GenValue(inputx_shape, 10) + self.inputy = self.GenValue(inputy_shape, 20) + self.inputz = self.GenValue(inputz_shape, 30) self.x_stra = x_stra self.y_stra = y_stra self.z_stra = z_stra - stra_size= 1 + stra_size = 1 for s in x_stra: - stra_size = stra_size*s + stra_size = stra_size * s self.stra_size = stra_size - + def GenValue(self, input_shape, delta): size = 1 for s in input_shape: - size = size*s + size = size * s number_range = min(100, size) - input_np = np.reshape(np.arange(0, size)%number_range - delta, input_shape).astype(np.float32) + input_np = np.reshape(np.arange(0, size) % number_range - delta, input_shape).astype(np.float32) return input_np - + def get_parallel_blocks(self, input_, strategy): blocks = [input_] i = 0 for stra in strategy: temp = [] - while len(blocks)>0: + while len(blocks) > 0: block = blocks.pop(0) temp.extend(np.split(block, stra, axis=i)) blocks.extend(temp) - i+=1 - return blocks - + i += 1 + return blocks + def grad_mindspore_impl_single(self): - x=Tensor(self.inputx) - y=Tensor(self.inputy) - z=Tensor(self.inputz) - sens=Tensor(1.0, dtype=ms.float32) + x = Tensor(self.inputx) + y = Tensor(self.inputy) + z = Tensor(self.inputz) + sens = Tensor(1.0, dtype=ms.float32) net = MatmulSingle() grad_net = Grad(net) - grad_net.set_train() + grad_net.set_train() input_grad = grad_net(x, y, z, sens) return input_grad - + def grad_mindspore_impl_reduce(self): inputxs = self.get_parallel_blocks(self.inputx, self.x_stra) inputys = self.get_parallel_blocks(self.inputy, self.y_stra) inputzs = self.get_parallel_blocks(self.inputz, self.z_stra) - x = Tensor(inputxs[device_id%self.stra_size]) - y = Tensor(inputys[device_id%self.stra_size]) - z = Tensor(inputzs[device_id%self.stra_size]) - repeat_num = device_num/self.stra_size - v = self.stra_size*repeat_num*repeat_num*repeat_num - sens = Tensor(1.0/v, dtype=ms.float32) + x = Tensor(inputxs[device_id % self.stra_size]) + y = Tensor(inputys[device_id % self.stra_size]) + z = Tensor(inputzs[device_id % self.stra_size]) + repeat_num = device_num / self.stra_size + v = self.stra_size * repeat_num * repeat_num * repeat_num + sens = Tensor(1.0 / v, dtype=ms.float32) net = MatmulReduce("hccl_world_group") grad_net = Grad(net) - grad_net.set_train() + grad_net.set_train() input_grad = grad_net(x, y, z, sens) return input_grad - + def grad_cmp(self): single_results = self.grad_mindspore_impl_single() reduce_results = self.grad_mindspore_impl_reduce() - single_result0 = self.get_parallel_blocks(single_results[0].asnumpy(), self.x_stra)[device_id%self.stra_size] + single_result0 = self.get_parallel_blocks(single_results[0].asnumpy(), self.x_stra)[device_id % self.stra_size] reduce_result0 = reduce_results[0].asnumpy() - single_result1 = self.get_parallel_blocks(single_results[1].asnumpy(), self.y_stra)[device_id%self.stra_size] + single_result1 = self.get_parallel_blocks(single_results[1].asnumpy(), self.y_stra)[device_id % self.stra_size] reduce_result1 = reduce_results[1].asnumpy() - single_result2 = self.get_parallel_blocks(single_results[2].asnumpy(), self.z_stra)[device_id%self.stra_size] + single_result2 = self.get_parallel_blocks(single_results[2].asnumpy(), self.z_stra)[device_id % self.stra_size] reduce_result2 = reduce_results[2].asnumpy() assert np.allclose(single_result0, reduce_result0, 0.0001, 0.0001) assert np.allclose(single_result1, reduce_result1, 0.0001, 0.0001) assert np.allclose(single_result2, reduce_result2, 0.0001, 0.0001) - + + def test_reduce_grad(): - inputx_shape = (32,64) - inputy_shape = (64,64) - inputz_shape = (64,32) - fact = MatmulReduceFactory(inputx_shape, inputy_shape, inputz_shape, (1,4), (4,1), (1,4)) + inputx_shape = (32, 64) + inputy_shape = (64, 64) + inputz_shape = (64, 32) + fact = MatmulReduceFactory(inputx_shape, inputy_shape, inputz_shape, (1, 4), (4, 1), (1, 4)) fact.grad_cmp() - + + def test_reduce_grad_repeat(): - inputx_shape = (32,64) - inputy_shape = (64,64) - inputz_shape = (64,32) - fact = MatmulReduceFactory(inputx_shape, inputy_shape, inputz_shape, (1,2), (2,1), (1,2)) + inputx_shape = (32, 64) + inputy_shape = (64, 64) + inputz_shape = (64, 32) + fact = MatmulReduceFactory(inputx_shape, inputy_shape, inputz_shape, (1, 2), (2, 1), (1, 2)) fact.grad_cmp() - - \ No newline at end of file diff --git a/tests/ut/python/parallel/parallel_end_to_end/l2normalize/_test_l2normalize_parallel_4p.py b/tests/ut/python/parallel/parallel_end_to_end/l2normalize/_test_l2normalize_parallel_4p.py index 2c5c91a3cc..00f924ab00 100644 --- a/tests/ut/python/parallel/parallel_end_to_end/l2normalize/_test_l2normalize_parallel_4p.py +++ b/tests/ut/python/parallel/parallel_end_to_end/l2normalize/_test_l2normalize_parallel_4p.py @@ -22,10 +22,11 @@ from mindspore.common.tensor import Tensor import mindspore.communication.management as distributedTool from mindspore.ops.composite import grad_all_with_sens -device_num=4 +device_num = 4 device_id = int(os.environ["RANK_ID"]) path = "./output/" + def setup_module(): print("~~~~~~~~~~~set up~~~~~~~~~~~~~") context.set_context(mode=context.GRAPH_MODE) @@ -34,9 +35,11 @@ def setup_module(): distributedTool.create_group("0-3", [0, 1, 2, 3]) print("~~~~~~~~~~~set up finished~~~~~~~~~~~~~") + def teardown_module(): print("~~~~~~~~~~~~tear down~~~~~~~~~~") + class L2normalize(Cell): def __init__(self, axis=0, epsilon=1e-4, strategy0=None, strategy1=None): super(L2normalize, self).__init__() @@ -48,6 +51,7 @@ class L2normalize(Cell): out = self.l2norm(out) return out + class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() @@ -56,24 +60,28 @@ class Grad(Cell): def construct(self, x, y, output_grad): return grad_all_with_sens(self.network)(x, y, output_grad) + class L2normalizeFactory: def __init__(self, input_shape, axis, strategy0, strategy1): prefix = "" size = 1 for s in input_shape: prefix = prefix + str(s) - size = size*s + size = size * s self.prefix = prefix number_range = min(1000, size) - self.input_np1 = np.reshape(np.arange(0, size)%number_range - number_range/2, input_shape).astype(np.float32) - self.input_np2 = np.reshape(np.arange(0, size)%number_range - number_range/4, input_shape).astype(np.float32) + self.input_np1 = np.reshape(np.arange(0, size) % number_range - number_range / 2, input_shape).astype( + np.float32) + self.input_np2 = np.reshape(np.arange(0, size) % number_range - number_range / 4, input_shape).astype( + np.float32) target_shape = input_shape self.target_shape = target_shape target_size = 1 for s in target_shape: - target_size = target_size*s - number_range = min(1000, target_size) - self.output_grad_np = np.reshape(np.arange(0, target_size)%number_range - number_range/2, target_shape).astype(np.float32) + target_size = target_size * s + number_range = min(1000, target_size) + self.output_grad_np = np.reshape(np.arange(0, target_size) % number_range - number_range / 2, + target_shape).astype(np.float32) self.axis = axis self.epsilon = 1e-4 self.strategy0 = strategy0 @@ -83,39 +91,39 @@ class L2normalizeFactory: need_dev_num0 = 1 need_dev_num1 = 1 for s in strategy0[1]: - need_dev_num0 = need_dev_num0*s + need_dev_num0 = need_dev_num0 * s for s in out_strategy: - need_dev_num1 = need_dev_num1*s - self.x_id = device_id%need_dev_num0 - self.y_id = device_id%need_dev_num0 - self.out_id = device_id%need_dev_num1 - + need_dev_num1 = need_dev_num1 * s + self.x_id = device_id % need_dev_num0 + self.y_id = device_id % need_dev_num0 + self.out_id = device_id % need_dev_num1 + def get_parallel_blocks(self, input_, strategy): blocks = [input_] i = 0 for stra in strategy: temp = [] - while len(blocks)>0: + while len(blocks) > 0: block = blocks.pop(0) temp.extend(np.split(block, stra, axis=i)) blocks.extend(temp) - i+=1 - return blocks - + i += 1 + return blocks + def forward_mindspore_impl(self): x = Tensor(self.input_np1) y = Tensor(self.input_np2) net = L2normalize(self.axis, self.epsilon) out = net(x, y) return out.asnumpy() - + def forward_mindspore_parallel_impl(self): x = Tensor(self.input_np1) y = Tensor(self.input_np2) inputs_x = self.get_parallel_blocks(self.input_np1, self.strategy0[1]) inputs_y = self.get_parallel_blocks(self.input_np2, self.strategy0[1]) x1 = Tensor(inputs_x[self.x_id]) - y1 = Tensor(inputs_y[self.y_id]) + y1 = Tensor(inputs_y[self.y_id]) net = L2normalize(self.axis, self.epsilon, strategy0=self.strategy0, strategy1=self.strategy1) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() @@ -131,7 +139,7 @@ class L2normalizeFactory: grad_net.set_train() input_grad = grad_net(x, y, output_grad) return input_grad - + def grad_mindspore_parallel_impl(self): x = Tensor(self.input_np1) y = Tensor(self.input_np2) @@ -147,9 +155,9 @@ class L2normalizeFactory: context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") grad_net.set_auto_parallel() grad_net.set_train() - input_grad = grad_net(x, y, output_grad, parallel_inputs_compile=[x, y, output_grad1], parallel_inputs_run=[x1, y1, output_grad1]) - return input_grad - + input_grad = grad_net(x, y, output_grad, parallel_inputs_compile=[x, y, output_grad1], + parallel_inputs_run=[x1, y1, output_grad1]) + return input_grad def forward_cmp(self): out_mindspore = self.forward_mindspore_impl() @@ -169,27 +177,30 @@ class L2normalizeFactory: assert np.allclose(input_grad_blocks_0[self.x_id], input_grad_mindspore_parallel0, 0.0001, 0.0001) assert np.allclose(input_grad_blocks_1[self.y_id], input_grad_mindspore_parallel1, 0.0001, 0.0001) + def test_reid_l2normalize_input_128_512(): - input_shape = (128,512) + input_shape = (128, 512) axis = 0 - fact = L2normalizeFactory(input_shape, axis, strategy0=(0, (4,1), (4,1)), strategy1=(0, (1,4))) + fact = L2normalizeFactory(input_shape, axis, strategy0=(0, (4, 1), (4, 1)), strategy1=(0, (1, 4))) fact.forward_cmp() + def test_reid_l2normalize_grad_input_128_512(): - input_shape = (128,512) + input_shape = (128, 512) axis = 0 - fact = L2normalizeFactory(input_shape, axis, (0, (4,1), (4,1)), strategy1=(0, (1,4))) + fact = L2normalizeFactory(input_shape, axis, (0, (4, 1), (4, 1)), strategy1=(0, (1, 4))) fact.grad_cmp() - + + def test_reid_l2normalize_input_128_512_repeat(): - input_shape = (128,512) + input_shape = (128, 512) axis = 0 - fact = L2normalizeFactory(input_shape, axis, strategy0=(0, (1,2), (1,2)), strategy1=(0, (1,2))) + fact = L2normalizeFactory(input_shape, axis, strategy0=(0, (1, 2), (1, 2)), strategy1=(0, (1, 2))) fact.forward_cmp() + def test_reid_l2normalize_grad_input_128_512_repeat(): - input_shape = (128,512) + input_shape = (128, 512) axis = 0 - fact = L2normalizeFactory(input_shape, axis, strategy0=(0, (1,2), (1,2)), strategy1=(0, (1,2))) + fact = L2normalizeFactory(input_shape, axis, strategy0=(0, (1, 2), (1, 2)), strategy1=(0, (1, 2))) fact.grad_cmp() - diff --git a/tests/ut/python/parallel/parallel_end_to_end/loss/_test_loss_parallel_4p.py b/tests/ut/python/parallel/parallel_end_to_end/loss/_test_loss_parallel_4p.py index 8701d2ae10..dd2c16d79f 100644 --- a/tests/ut/python/parallel/parallel_end_to_end/loss/_test_loss_parallel_4p.py +++ b/tests/ut/python/parallel/parallel_end_to_end/loss/_test_loss_parallel_4p.py @@ -22,10 +22,11 @@ from mindspore.common.tensor import Tensor import mindspore.communication.management as distributedTool from mindspore.ops.composite import grad_all -device_num=4 +device_num = 4 device_id = int(os.environ["RANK_ID"]) path = "./output/" + def setup_module(): print("~~~~~~~~~~~set up~~~~~~~~~~~~~") context.set_context(mode=context.GRAPH_MODE) @@ -34,9 +35,11 @@ def setup_module(): distributedTool.create_group("0-3", [0, 1, 2, 3]) print("~~~~~~~~~~~set up finished~~~~~~~~~~~~~") + def teardown_module(): print("~~~~~~~~~~~~tear down~~~~~~~~~~") + class AddRelu(Cell): def __init__(self, strategy0=None, strategy1=None): super(AddRelu, self).__init__() @@ -48,6 +51,7 @@ class AddRelu(Cell): out = self.relu(out) return out + class NetWithLoss(Cell): def __init__(self, network, strategy2=None): super(NetWithLoss, self).__init__() @@ -67,24 +71,28 @@ class Grad(Cell): def construct(self, x, y, b): return grad_all(self.network)(x, y, b) + class AddReluFactory: def __init__(self, input_shape, strategy0, strategy1, strategy2): prefix = "" size = 1 for s in input_shape: prefix = prefix + str(s) - size = size*s + size = size * s self.prefix = prefix number_range = min(1000, size) - self.input_np1 = np.reshape(np.arange(0, size)%number_range - number_range/2, input_shape).astype(np.float32) - self.input_np2 = np.reshape(np.arange(0, size)%number_range - number_range/4, input_shape).astype(np.float32) + self.input_np1 = np.reshape(np.arange(0, size) % number_range - number_range / 2, input_shape).astype( + np.float32) + self.input_np2 = np.reshape(np.arange(0, size) % number_range - number_range / 4, input_shape).astype( + np.float32) target_shape = input_shape self.target_shape = target_shape target_size = 1 for s in target_shape: - target_size = target_size*s - number_range = min(10, target_size) - self.output_grad_np = np.reshape((np.arange(0, target_size)%number_range)*0.1, target_shape).astype(np.float32) + target_size = target_size * s + number_range = min(10, target_size) + self.output_grad_np = np.reshape((np.arange(0, target_size) % number_range) * 0.1, target_shape).astype( + np.float32) self.strategy0 = strategy0 self.strategy1 = strategy1 self.strategy2 = strategy2 @@ -93,24 +101,24 @@ class AddReluFactory: need_dev_num0 = 1 need_dev_num1 = 1 for s in strategy0[1]: - need_dev_num0 = need_dev_num0*s + need_dev_num0 = need_dev_num0 * s for s in out_strategy: - need_dev_num1 = need_dev_num1*s - self.x_id = device_id%need_dev_num0 - self.y_id = device_id%need_dev_num0 - self.out_id = device_id%need_dev_num1 - + need_dev_num1 = need_dev_num1 * s + self.x_id = device_id % need_dev_num0 + self.y_id = device_id % need_dev_num0 + self.out_id = device_id % need_dev_num1 + def get_parallel_blocks(self, input_, strategy): blocks = [input_] i = 0 for stra in strategy: temp = [] - while len(blocks)>0: + while len(blocks) > 0: block = blocks.pop(0) temp.extend(np.split(block, stra, axis=i)) blocks.extend(temp) - i+=1 - return blocks + i += 1 + return blocks def grad_mindspore_impl(self): x = Tensor(self.input_np1) @@ -119,13 +127,13 @@ class AddReluFactory: net = AddRelu() net_with_loss = NetWithLoss(net) grad_net = Grad(net_with_loss) - grad_net.set_train() + grad_net.set_train() input_grads = [] for i in range(0, 3): input_grad = grad_net(x, y, output_grad) input_grads.append(input_grad) return input_grads - + def grad_mindspore_parallel_impl(self): x = Tensor(self.input_np1) y = Tensor(self.input_np2) @@ -144,15 +152,15 @@ class AddReluFactory: grad_net.set_train() input_grads = [] for i in range(0, 3): - input_grad = grad_net(x, y, output_grad, parallel_inputs_compile=[x, y, output_grad], parallel_inputs_run=[x1, y1, output_grad1]) + input_grad = grad_net(x, y, output_grad, parallel_inputs_compile=[x, y, output_grad], + parallel_inputs_run=[x1, y1, output_grad1]) input_grads.append(input_grad) - return input_grads - + return input_grads def grad_cmp(self): input_grad_mindspores = self.grad_mindspore_impl() input_grad_mindspore_parallels = self.grad_mindspore_parallel_impl() - for i in range(0,len(input_grad_mindspores)): + for i in range(0, len(input_grad_mindspores)): input_grad_mindspore = input_grad_mindspores[i] input_grad_mindspore_parallel = input_grad_mindspore_parallels[i] input_grad_mindspore0 = input_grad_mindspore[0].asnumpy() @@ -161,21 +169,27 @@ class AddReluFactory: input_grad_mindspore_parallel1 = input_grad_mindspore_parallel[1].asnumpy() input_grad_blocks_0 = self.get_parallel_blocks(input_grad_mindspore0, self.strategy0[1]) input_grad_blocks_1 = self.get_parallel_blocks(input_grad_mindspore1, self.strategy0[2]) - np.save(path+str(i)+"_"+str(device_id)+"_"+self.prefix+"_grad_single0.npy", input_grad_blocks_0[self.x_id]) - np.save(path+str(i)+"_"+str(device_id)+"_"+self.prefix+"_grad_single1.npy", input_grad_blocks_1[self.y_id]) - np.save(path+str(i)+"_"+str(device_id)+"_"+self.prefix+"_grad_parallel0.npy", input_grad_mindspore_parallel0) - np.save(path+str(i)+"_"+str(device_id)+"_"+self.prefix+"_grad_parallel1.npy", input_grad_mindspore_parallel1) + np.save(path + str(i) + "_" + str(device_id) + "_" + self.prefix + "_grad_single0.npy", + input_grad_blocks_0[self.x_id]) + np.save(path + str(i) + "_" + str(device_id) + "_" + self.prefix + "_grad_single1.npy", + input_grad_blocks_1[self.y_id]) + np.save(path + str(i) + "_" + str(device_id) + "_" + self.prefix + "_grad_parallel0.npy", + input_grad_mindspore_parallel0) + np.save(path + str(i) + "_" + str(device_id) + "_" + self.prefix + "_grad_parallel1.npy", + input_grad_mindspore_parallel1) assert np.allclose(input_grad_blocks_0[self.x_id], input_grad_mindspore_parallel0, 0.0001, 0.0001) assert np.allclose(input_grad_blocks_1[self.y_id], input_grad_mindspore_parallel1, 0.0001, 0.0001) - def test_reid_l2normalize_grad_input_128_512(): - input_shape = (128,512) - fact = AddReluFactory(input_shape, strategy0=(0, (4,1), (4,1)), strategy1=(0, (4,1)), strategy2=(0,(4,1),(4,1))) + input_shape = (128, 512) + fact = AddReluFactory(input_shape, strategy0=(0, (4, 1), (4, 1)), strategy1=(0, (4, 1)), + strategy2=(0, (4, 1), (4, 1))) fact.grad_cmp() + def test_reid_l2normalize_grad_input_128_512_stridesplit(): - input_shape = (128,512) - fact = AddReluFactory(input_shape, strategy0=(0, (1,1), (1,1)), strategy1=(0, (4,1)), strategy2=(0,(4,1),(4,1))) - fact.grad_cmp() + input_shape = (128, 512) + fact = AddReluFactory(input_shape, strategy0=(0, (1, 1), (1, 1)), strategy1=(0, (4, 1)), + strategy2=(0, (4, 1), (4, 1))) + fact.grad_cmp() diff --git a/tests/ut/python/parallel/parallel_end_to_end/matmul/_test_matmul_parallel_4p.py b/tests/ut/python/parallel/parallel_end_to_end/matmul/_test_matmul_parallel_4p.py index e7171c1862..3cdd878ef3 100644 --- a/tests/ut/python/parallel/parallel_end_to_end/matmul/_test_matmul_parallel_4p.py +++ b/tests/ut/python/parallel/parallel_end_to_end/matmul/_test_matmul_parallel_4p.py @@ -22,10 +22,11 @@ from mindspore.common.tensor import Tensor import mindspore.communication.management as distributedTool from mindspore.ops.composite import grad_all_with_sens -device_num=4 +device_num = 4 device_id = int(os.environ["RANK_ID"]) path = "./output/" + def setup_module(): print("~~~~~~~~~~~set up~~~~~~~~~~~~~") context.set_context(mode=context.GRAPH_MODE) @@ -33,28 +34,34 @@ def setup_module(): distributedTool.init() distributedTool.create_group("0-3", [0, 1, 2, 3]) print("~~~~~~~~~~~set up finished~~~~~~~~~~~~~") - + + def teardown_module(): print("~~~~~~~~~~~~tear down~~~~~~~~~~") + class Matmul(Cell): def __init__(self, transpose_a=False, transpose_b=False, strategy0=None, strategy1=None): super(Matmul, self).__init__() self.add = P.TensorAdd(strategy=strategy1) self.matmul = P.MatMul(transpose_a, transpose_b, strategy=strategy0) + def construct(self, x, w, z): out = self.add(x, z) return self.matmul(out, w) + class BatchMatMul(Cell): def __init__(self, transpose_a=False, transpose_b=False, strategy0=None, strategy1=None): super(BatchMatMul, self).__init__() self.add = P.TensorAdd(strategy=strategy1) self.batchmatmul = P.BatchMatMul(transpose_a, transpose_b, strategy=strategy0) + def construct(self, x, w, z): out = self.add(x, z) return self.batchmatmul(out, w) + class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() @@ -64,6 +71,7 @@ class Grad(Cell): gout = grad_all_with_sens(self.network)(inputa, inputb, inputz, output_grad) return gout + class BatchmatmulFactory: def __init__(self, inputa_shape, inputb_shape, transpose_a, transpose_b, strategy, strategy_): self.strategy = strategy @@ -73,19 +81,21 @@ class BatchmatmulFactory: prefix = "" for s in inputa_shape: prefix = prefix + str(s) + "_" - inputa_size = inputa_size*s + inputa_size = inputa_size * s prefix = prefix + "and" for s in inputb_shape: prefix = prefix + str(s) + "_" - inputb_size = inputb_size*s + inputb_size = inputb_size * s number_rangea = min(1000, inputa_size) number_rangeb = min(1000, inputb_size) - self.inputa = np.reshape(np.arange(0, inputa_size)%number_rangea - number_rangea/2, inputa_shape).astype(np.float32) - self.inputb = np.reshape(np.arange(0, inputb_size)%number_rangeb - number_rangeb/2, inputb_shape).astype(np.float32) + self.inputa = np.reshape(np.arange(0, inputa_size) % number_rangea - number_rangea / 2, inputa_shape).astype( + np.float32) + self.inputb = np.reshape(np.arange(0, inputb_size) % number_rangeb - number_rangeb / 2, inputb_shape).astype( + np.float32) self.inputz = np.zeros(self.inputa.shape).astype(np.float32) self.transpose_a = transpose_a self.transpose_b = transpose_b - + out_shape = [] device_matrix = [] out_strategy = [] @@ -97,15 +107,14 @@ class BatchmatmulFactory: temp = inputb_shape[-1] inputb_shape[-1] = inputb_shape[-2] inputb_shape[-2] = temp - + if (len(inputa_shape) >= len(inputb_shape)): out_shape = list(inputa_shape) out_shape[-1] = inputb_shape[-1] else: out_shape = list(inputb_shape) out_shape[-2] = inputa_shape[-2] - - + strategy1 = list(self.strategy[1]) strategy2 = list(self.strategy[2]) if transpose_a: @@ -116,7 +125,7 @@ class BatchmatmulFactory: temp = strategy2[-1] strategy2[-1] = strategy2[-2] strategy2[-2] = temp - + if (len(strategy1) >= len(strategy2)): out_strategy = strategy1.copy() out_strategy[-1] = strategy2[-1] @@ -126,20 +135,21 @@ class BatchmatmulFactory: device_matrix = out_strategy.copy() device_matrix.insert(-1, strategy1[-1]) self.out_strategy = out_strategy - - need_dev_num = 1 + + need_dev_num = 1 for s in device_matrix: - need_dev_num = need_dev_num*s + need_dev_num = need_dev_num * s self.need_dev_num = need_dev_num self.device_matrix = device_matrix - + out_size = 1 for s in out_shape: - out_size = out_size*s + out_size = out_size * s number_range = min(1000, out_size) - self.output_grad_np = np.reshape(np.arange(0, out_size)%number_range - number_range/2, out_shape).astype(np.float32) - - device_index = self.id_to_list(device_id%need_dev_num, self.device_matrix) + self.output_grad_np = np.reshape(np.arange(0, out_size) % number_range - number_range / 2, out_shape).astype( + np.float32) + + device_index = self.id_to_list(device_id % need_dev_num, self.device_matrix) x_index = device_index[:-1].copy() if transpose_a: temp = x_index[-1] @@ -152,58 +162,56 @@ class BatchmatmulFactory: temp = y_index[-1] y_index[-1] = y_index[-2] y_index[-2] = temp - + out_index = device_index[:-2].copy() out_index.append(device_index[-1]) - + print(device_matrix) print(device_index) - + need_dev_num_ = 1 for s in strategy_[1]: - need_dev_num_ = need_dev_num_*s - self.x_id = device_id%need_dev_num_ + need_dev_num_ = need_dev_num_ * s + self.x_id = device_id % need_dev_num_ self.y_id = self.list_to_id(y_index, self.strategy[2]) self.out_id = self.list_to_id(out_index, self.out_strategy) - - - + def get_parallel_blocks(self, input_, strategy): blocks = [input_] i = 0 for stra in strategy: temp = [] - while len(blocks)>0: + while len(blocks) > 0: block = blocks.pop(0) temp.extend(np.split(block, stra, axis=i)) blocks.extend(temp) - i+=1 + i += 1 return blocks - + """ shape:每一维的上限,如(2,4,8) """ + def id_to_list(self, id, shape): result = [] r = id for i in range(0, len(shape)): v = 1 - for j in range(i+1, len(shape)): - v = v*shape[j] - result.append(r//v) - r = r%v + for j in range(i + 1, len(shape)): + v = v * shape[j] + result.append(r // v) + r = r % v return result - + def list_to_id(self, id_list, shape): result = 0 for i in range(0, len(id_list)): v = 1 - for j in range(i+1, len(id_list)): - v = v*shape[j] - result = result + id_list[i]*v + for j in range(i + 1, len(id_list)): + v = v * shape[j] + result = result + id_list[i] * v return result - - + def forward_mindspore_impl(self): if len(self.inputa.shape) > 2: matmul = BatchMatMul(self.transpose_a, self.transpose_b) @@ -212,7 +220,7 @@ class BatchmatmulFactory: matmul.set_train() out_me = matmul(Tensor(self.inputa), Tensor(self.inputb), Tensor(self.inputz)) return out_me.asnumpy() - + def forward_mindspore_parallel_impl(self): if len(self.inputa.shape) > 2: matmul = BatchMatMul(self.transpose_a, self.transpose_b, strategy0=self.strategy, strategy1=self.strategy_) @@ -225,14 +233,14 @@ class BatchmatmulFactory: xs = self.get_parallel_blocks(self.inputa, self.strategy_[1]) ys = self.get_parallel_blocks(self.inputb, self.strategy[2]) zs = self.get_parallel_blocks(self.inputz, self.strategy_[1]) - x1 = Tensor(xs[self.x_id]) # - y1 = Tensor(ys[self.y_id]) #需要从设备矩阵推导 + x1 = Tensor(xs[self.x_id]) # + y1 = Tensor(ys[self.y_id]) # 需要从设备矩阵推导 z1 = Tensor(zs[self.x_id]) matmul.set_train() matmul.set_auto_parallel() out_me = matmul(x, y, z, parallel_inputs_compile=[x, y, z], parallel_inputs_run=[x1, y1, z1]) return out_me.asnumpy() - + def grad_mindspore_impl(self): x = Tensor(self.inputa) y = Tensor(self.inputb) @@ -246,7 +254,7 @@ class BatchmatmulFactory: out_grad_me = Tensor(self.output_grad_np) out_grad = net_me(x, y, z, out_grad_me) return out_grad - + def grad_mindspore_parallel_impl(self): if len(self.inputa.shape) > 2: matmul = BatchMatMul(self.transpose_a, self.transpose_b, strategy0=self.strategy, strategy1=self.strategy_) @@ -256,22 +264,23 @@ class BatchmatmulFactory: y = Tensor(self.inputb) z = Tensor(self.inputz) out_grad_me = Tensor(self.output_grad_np) - + xs = self.get_parallel_blocks(self.inputa, self.strategy_[1]) ys = self.get_parallel_blocks(self.inputb, self.strategy[2]) zs = self.get_parallel_blocks(self.inputz, self.strategy_[1]) out_grads = self.get_parallel_blocks(self.output_grad_np, self.out_strategy) - - x1 = Tensor(xs[self.x_id]) #需要从设备矩阵推导 - y1 = Tensor(ys[self.y_id]) # + + x1 = Tensor(xs[self.x_id]) # 需要从设备矩阵推导 + y1 = Tensor(ys[self.y_id]) # z1 = Tensor(zs[self.x_id]) out_grad1 = Tensor(out_grads[self.out_id]) net_me = Grad(matmul) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net_me.set_auto_parallel() net_me.set_train() - - out_grad = net_me(x, y, z, out_grad_me, parallel_inputs_compile = [x, y, z, out_grad1], parallel_inputs_run = [x1, y1, z1, out_grad1]) + + out_grad = net_me(x, y, z, out_grad_me, parallel_inputs_compile=[x, y, z, out_grad1], + parallel_inputs_run=[x1, y1, z1, out_grad1]) return out_grad def forward_cmp(self): @@ -289,27 +298,31 @@ class BatchmatmulFactory: assert allclose(input_grad_mindspores0[self.x_id], input_grad_mindspore_parallel[0].asnumpy(), 0.0001, 0.0001) assert allclose(input_grad_mindspores1[self.y_id], input_grad_mindspore_parallel[1].asnumpy(), 0.0001, 0.0001) assert allclose(input_grad_mindspores2[self.x_id], input_grad_mindspore_parallel[2].asnumpy(), 0.0001, 0.0001) - + + def test_reid_batchmatmul_inputa_128_512_inputb_2000_512(): inputa = [128, 512] inputb = [2000, 512] - fact = BatchmatmulFactory(inputa, inputb, False, True, (0,(2,2),(1,2)), (0,(2,2),(2,2))) + fact = BatchmatmulFactory(inputa, inputb, False, True, (0, (2, 2), (1, 2)), (0, (2, 2), (2, 2))) fact.forward_cmp() + def test_reid_batchmatmul_grad_inputa_128_512_inputb_2000_512(): inputa = [128, 512] inputb = [2000, 512] - fact = BatchmatmulFactory(inputa, inputb, False, True, (0, (2,2),(1,2)), (0,(2,2),(2,2))) + fact = BatchmatmulFactory(inputa, inputb, False, True, (0, (2, 2), (1, 2)), (0, (2, 2), (2, 2))) fact.grad_cmp() + def test_reid_batchmatmul_inputa_128_512_inputb_2000_512_redistribution(): inputa = [128, 512] inputb = [2000, 512] - fact = BatchmatmulFactory(inputa, inputb, False, True, (0,(1,2),(1,2)), (0,(2,2),(2,2))) + fact = BatchmatmulFactory(inputa, inputb, False, True, (0, (1, 2), (1, 2)), (0, (2, 2), (2, 2))) fact.forward_cmp() + def test_reid_batchmatmul_grad_inputa_128_512_inputb_2000_512_redistribution(): inputa = [128, 512] inputb = [2000, 512] - fact = BatchmatmulFactory(inputa, inputb, False, True, (0, (1,2),(1,2)), (0,(2,2),(2,2))) + fact = BatchmatmulFactory(inputa, inputb, False, True, (0, (1, 2), (1, 2)), (0, (2, 2), (2, 2))) fact.grad_cmp() diff --git a/tests/ut/python/parallel/parallel_end_to_end/max/_test_max_parallel_4p.py b/tests/ut/python/parallel/parallel_end_to_end/max/_test_max_parallel_4p.py index f2b1252499..648692226a 100644 --- a/tests/ut/python/parallel/parallel_end_to_end/max/_test_max_parallel_4p.py +++ b/tests/ut/python/parallel/parallel_end_to_end/max/_test_max_parallel_4p.py @@ -22,10 +22,11 @@ from mindspore.common.tensor import Tensor import mindspore.communication.management as distributedTool from mindspore.ops.composite import grad_all_with_sens -device_num=4 +device_num = 4 device_id = int(os.environ["RANK_ID"]) path = "./output/" + def setup_module(): print("~~~~~~~~~~~set up~~~~~~~~~~~~~") context.set_context(mode=context.GRAPH_MODE) @@ -34,9 +35,11 @@ def setup_module(): distributedTool.create_group("0-3", [0, 1, 2, 3]) print("~~~~~~~~~~~set up finished~~~~~~~~~~~~~") + def teardown_module(): print("~~~~~~~~~~~~tear down~~~~~~~~~~") + class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() @@ -68,48 +71,50 @@ class MaxFactory: prefix = "" for s in input_shape: prefix = prefix + str(s) + "_" - input_size = input_size*s + input_size = input_size * s number_range = min(1000, input_size) - self.input_np1 = np.reshape(np.arange(0, input_size)%number_range - number_range/2, input_shape).astype(np.float32) + self.input_np1 = np.reshape(np.arange(0, input_size) % number_range - number_range / 2, input_shape).astype( + np.float32) self.input_np2 = self.input_np1.copy() self.out_grad_np = None out_shape = list(input_shape) out_shape.pop(axis) - out_size = input_size/input_shape[axis] + out_size = input_size / input_shape[axis] number_range_ = min(1000, out_size) - self.out_grad_np = np.reshape(np.arange(0, out_size)%number_range_ - number_range_/2, out_shape).astype(np.float32) + self.out_grad_np = np.reshape(np.arange(0, out_size) % number_range_ - number_range_ / 2, out_shape).astype( + np.float32) out_strategy = list(strategy1[1]) out_strategy.pop(axis) self.out_strategy = out_strategy need_dev_num = 1 need_dev_num_ = 1 for s in strategy0[1]: - need_dev_num = need_dev_num*s + need_dev_num = need_dev_num * s for s in out_strategy: - need_dev_num_ = need_dev_num_*s - self.x_id = device_id%need_dev_num - self.y_id = device_id%need_dev_num - self.out_id = device_id%need_dev_num_ - + need_dev_num_ = need_dev_num_ * s + self.x_id = device_id % need_dev_num + self.y_id = device_id % need_dev_num + self.out_id = device_id % need_dev_num_ + def get_parallel_blocks(self, input_, strategy): blocks = [input_] i = 0 for stra in strategy: temp = [] - while len(blocks)>0: + while len(blocks) > 0: block = blocks.pop(0) temp.extend(np.split(block, stra, axis=i)) blocks.extend(temp) - i+=1 + i += 1 return blocks - + def forward_mindspore_impl(self): input1 = Tensor(self.input_np1) input2 = Tensor(self.input_np2) net = Max(axis=self.axis, keep_dims=self.keep_dims) out = net(input1, input2) return out.asnumpy() - + def forward_mindspore_parallel_impl(self): x = Tensor(self.input_np1) y = Tensor(self.input_np2) @@ -122,7 +127,7 @@ class MaxFactory: net.set_auto_parallel() out = net(x, y, parallel_inputs_compile=[x, y], parallel_inputs_run=[x1, y1]) return out.asnumpy() - + def grad_mindspore_impl(self): input1 = Tensor(self.input_np1) input2 = Tensor(self.input_np2) @@ -147,9 +152,10 @@ class MaxFactory: context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") grad_net.set_auto_parallel() grad_net.set_train() - input_grad = grad_net(x, y, out_grad, parallel_inputs_compile=[x, y, out_grad], parallel_inputs_run=[x1, y1, out_grad]) + input_grad = grad_net(x, y, out_grad, parallel_inputs_compile=[x, y, out_grad], + parallel_inputs_run=[x1, y1, out_grad]) return input_grad - + def forward_cmp(self): out_mindspore = self.forward_mindspore_impl() out_mindspore_parallel = self.forward_mindspore_parallel_impl() @@ -170,26 +176,38 @@ class MaxFactory: assert np.allclose(input_grad_blocks_0[self.x_id], input_grad_mindspore_parallel0, 0.0001, 0.0001) assert np.allclose(input_grad_blocks_1[self.y_id], input_grad_mindspore_parallel1, 0.0001, 0.0001) + def test_reid_max_forward_input_256_64(): - fact = MaxFactory(input_shape=(256, 64), axis=1, keep_dims=False, strategy0=(0,(4,1),(4,1)), strategy1=(0,(4,1))) + fact = MaxFactory(input_shape=(256, 64), axis=1, keep_dims=False, strategy0=(0, (4, 1), (4, 1)), + strategy1=(0, (4, 1))) fact.forward_cmp() + def test_reid_max_grad_input_256_64(): - fact = MaxFactory(input_shape=(256, 64), axis=1, keep_dims=False, strategy0=(0,(4,1),(4,1)), strategy1=(0,(4,1))) + fact = MaxFactory(input_shape=(256, 64), axis=1, keep_dims=False, strategy0=(0, (4, 1), (4, 1)), + strategy1=(0, (4, 1))) fact.grad_cmp() - + + def test_reid_max_forward_input_128_64_32_32(): - fact = MaxFactory(input_shape=(128, 64, 32, 32), axis=3, keep_dims=False, strategy0=(0,(2,1,2,1),(2,1,2,1)), strategy1=(0,(2,1,2,1))) + fact = MaxFactory(input_shape=(128, 64, 32, 32), axis=3, keep_dims=False, strategy0=(0, (2, 1, 2, 1), (2, 1, 2, 1)), + strategy1=(0, (2, 1, 2, 1))) fact.forward_cmp() + def test_reid_max_grad_input_128_64_32_32(): - fact = MaxFactory(input_shape=(128, 64, 32, 32), axis=3, keep_dims=False, strategy0=(0,(2,1,2,1),(2,1,2,1)), strategy1=(0,(2,1,2,1))) + fact = MaxFactory(input_shape=(128, 64, 32, 32), axis=3, keep_dims=False, strategy0=(0, (2, 1, 2, 1), (2, 1, 2, 1)), + strategy1=(0, (2, 1, 2, 1))) fact.grad_cmp() - + + def test_reid_max_forward_input_256_64_repeat(): - fact = MaxFactory(input_shape=(256, 64), axis=1, keep_dims=False, strategy0=(0,(2,1),(2,1)), strategy1=(0,(2,1))) + fact = MaxFactory(input_shape=(256, 64), axis=1, keep_dims=False, strategy0=(0, (2, 1), (2, 1)), + strategy1=(0, (2, 1))) fact.forward_cmp() + def test_reid_max_grad_input_256_64_repeat(): - fact = MaxFactory(input_shape=(256, 64), axis=1, keep_dims=False, strategy0=(0,(2,1),(2,1)), strategy1=(0,(2,1))) + fact = MaxFactory(input_shape=(256, 64), axis=1, keep_dims=False, strategy0=(0, (2, 1), (2, 1)), + strategy1=(0, (2, 1))) fact.grad_cmp() diff --git a/tests/ut/python/parallel/parallel_end_to_end/mul_softmax/need_fix_test_mul_softmax_parallel_4p.py b/tests/ut/python/parallel/parallel_end_to_end/mul_softmax/need_fix_test_mul_softmax_parallel_4p.py index efb0f92677..f005d577ce 100644 --- a/tests/ut/python/parallel/parallel_end_to_end/mul_softmax/need_fix_test_mul_softmax_parallel_4p.py +++ b/tests/ut/python/parallel/parallel_end_to_end/mul_softmax/need_fix_test_mul_softmax_parallel_4p.py @@ -24,10 +24,11 @@ from mindspore.common.tensor import Tensor import mindspore.communication.management as distributedTool from mindspore.ops.composite import grad_all_with_sens -device_num=4 +device_num = 4 device_id = int(os.environ["RANK_ID"]) path = "./output/" + def setup_module(): print("~~~~~~~~~~~set up~~~~~~~~~~~~~") context.set_context(mode=context.GRAPH_MODE) @@ -36,9 +37,11 @@ def setup_module(): distributedTool.create_group("0-3", [0, 1, 2, 3]) print("~~~~~~~~~~~set up finished~~~~~~~~~~~~~") + def teardown_module(): print("~~~~~~~~~~~~tear down~~~~~~~~~~") - + + class MulSoftmax(Cell): def __init__(self, strategy0=None, strategy1=None, axis=0): super(MulSoftmax, self).__init__() @@ -65,23 +68,25 @@ class MulSoftmaxFactory: size = 1 for s in input_shape: prefix = prefix + str(s) - size = size*s + size = size * s self.prefix = prefix number_range = min(1000, size) - self.input_np1 = np.reshape(np.arange(0, size)%number_range - number_range/2, input_shape).astype(np.float32) + self.input_np1 = np.reshape(np.arange(0, size) % number_range - number_range / 2, input_shape).astype( + np.float32) self.input_np2 = 1.0 - self.output_grad_np = np.reshape((np.arange(0, size)%(number_range-10) - number_range/2)*0.1, input_shape).astype(np.float32) + self.output_grad_np = np.reshape((np.arange(0, size) % (number_range - 10) - number_range / 2) * 0.1, + input_shape).astype(np.float32) self.strategy0 = strategy0 self.strategy1 = strategy1 need_dev_num = 1 need_dev_num_ = 1 for s in strategy0[1]: - need_dev_num = need_dev_num*s + need_dev_num = need_dev_num * s for s in strategy1[1]: - need_dev_num_ = need_dev_num_*s - self.x_id = device_id%need_dev_num - self.y_id = device_id%need_dev_num - self.out_id = device_id%need_dev_num_ + need_dev_num_ = need_dev_num_ * s + self.x_id = device_id % need_dev_num + self.y_id = device_id % need_dev_num + self.out_id = device_id % need_dev_num_ def forward_mindspore_impl(self): net = MulSoftmax() @@ -98,10 +103,10 @@ class MulSoftmaxFactory: y = Tensor(self.input_np2, ms.float32) inputs_x = self.get_parallel_blocks(self.input_np1, self.strategy0[1]) x1 = Tensor(inputs_x[self.x_id]) - y1 = Tensor(self.input_np2, ms.float32) + y1 = Tensor(self.input_np2, ms.float32) out = net(x, y, parallel_inputs_compile=[x, y], parallel_inputs_run=[x1, y1]) return out.asnumpy() - + def grad_mindspore_impl(self): output_grad = Tensor(self.output_grad_np) x = Tensor(self.input_np1) @@ -114,7 +119,7 @@ class MulSoftmaxFactory: def grad_mindspore_parallel_impl(self): output_grads = self.get_parallel_blocks(self.output_grad_np, self.strategy1[1]) - output_grad = Tensor(output_grads[self.out_id]) + output_grad = Tensor(output_grads[self.out_id]) x = Tensor(self.input_np1) y = Tensor(self.input_np2, ms.float32) net = MulSoftmax(strategy0=self.strategy0, strategy1=self.strategy1) @@ -125,25 +130,26 @@ class MulSoftmaxFactory: inputs_x = self.get_parallel_blocks(self.input_np1, self.strategy0[1]) x1 = Tensor(inputs_x[self.x_id]) y1 = Tensor(self.input_np2, ms.float32) - input_grad = grad_net(x, y, output_grad, parallel_inputs_compile=[x, y, output_grad], parallel_inputs_run=[x1, y1, output_grad]) + input_grad = grad_net(x, y, output_grad, parallel_inputs_compile=[x, y, output_grad], + parallel_inputs_run=[x1, y1, output_grad]) return input_grad - + def get_parallel_blocks(self, input_, strategy): blocks = [input_] i = 0 for stra in strategy: temp = [] - while len(blocks)>0: + while len(blocks) > 0: block = blocks.pop(0) temp.extend(np.split(block, stra, axis=i)) blocks.extend(temp) - i+=1 + i += 1 return blocks - + def forward_cmp(self): out_mindspore = self.forward_mindspore_impl() out_mindspore_parallel = self.forward_mindspore_parallel_impl() - np.save(path+str(device_id)+"_"+self.prefix+"_forward_parallel.npy", out_mindspore_parallel) + np.save(path + str(device_id) + "_" + self.prefix + "_forward_parallel.npy", out_mindspore_parallel) out_blocks = self.get_parallel_blocks(out_mindspore, self.strategy1[1]) assert np.allclose(out_blocks[self.out_id], out_mindspore_parallel, 0.0001, 0.001) @@ -154,36 +160,41 @@ class MulSoftmaxFactory: input_grad_mindspore1 = input_grad_mindspore[1].asnumpy() input_grad_mindspore_parallel0 = input_grad_mindspore_parallel[0].asnumpy() input_grad_mindspore_parallel1 = input_grad_mindspore_parallel[1].asnumpy() - np.save(path+str(device_id)+"_"+self.prefix+"_grad_parallel0.npy", input_grad_mindspore_parallel0) - np.save(path+str(device_id)+"_"+self.prefix+"_grad_parallel1.npy", input_grad_mindspore_parallel1) - input_grad_blocks_0 = self.get_parallel_blocks(input_grad_mindspore0, self.strategy0[1])#这里由于TensorMul两个输入X1没做广播,X2做了广播 + np.save(path + str(device_id) + "_" + self.prefix + "_grad_parallel0.npy", input_grad_mindspore_parallel0) + np.save(path + str(device_id) + "_" + self.prefix + "_grad_parallel1.npy", input_grad_mindspore_parallel1) + input_grad_blocks_0 = self.get_parallel_blocks(input_grad_mindspore0, + self.strategy0[1]) # 这里由于TensorMul两个输入X1没做广播,X2做了广播 assert np.allclose(input_grad_blocks_0[self.x_id], input_grad_mindspore_parallel0, 0.0001, 0.0001) assert np.allclose(input_grad_mindspore1, input_grad_mindspore_parallel1, 0.0001, 0.0001) - + + @pytest.mark.reid_forward def test_reid_mul_softmax_input_128x64(): - stra0 = (0,(1,4),()) - stra1 = (0,(1,4)) + stra0 = (0, (1, 4), ()) + stra1 = (0, (1, 4)) fact = MulSoftmaxFactory(input_shape=(128, 64), strategy0=stra0, strategy1=stra1) fact.forward_cmp() + @pytest.mark.reid_grad def test_reid_grad_mul_softmax_input_128x64(): - stra0 = (0,(1,4),()) - stra1 = (0,(1,4)) + stra0 = (0, (1, 4), ()) + stra1 = (0, (1, 4)) fact = MulSoftmaxFactory(input_shape=(128, 64), strategy0=stra0, strategy1=stra1) fact.grad_cmp() + @pytest.mark.reid_forward def test_reid_mul_softmax_input_128x64_all_to_all(): - stra0 = (0,(4,1),()) - stra1 = (0,(1,4)) + stra0 = (0, (4, 1), ()) + stra1 = (0, (1, 4)) fact = MulSoftmaxFactory(input_shape=(128, 64), strategy0=stra0, strategy1=stra1) fact.forward_cmp() + @pytest.mark.reid_grad def test_reid_grad_mul_softmax_input_128x64_all_to_all(): - stra0 = (0,(4,1),()) - stra1 = (0,(1,4)) + stra0 = (0, (4, 1), ()) + stra1 = (0, (1, 4)) fact = MulSoftmaxFactory(input_shape=(128, 64), strategy0=stra0, strategy1=stra1) fact.grad_cmp() diff --git a/tests/ut/python/parallel/parallel_end_to_end/onehot/_test_onehot_parallel_4p.py b/tests/ut/python/parallel/parallel_end_to_end/onehot/_test_onehot_parallel_4p.py index f60061e3ac..c9995ead95 100644 --- a/tests/ut/python/parallel/parallel_end_to_end/onehot/_test_onehot_parallel_4p.py +++ b/tests/ut/python/parallel/parallel_end_to_end/onehot/_test_onehot_parallel_4p.py @@ -23,10 +23,11 @@ from mindspore.ops import operations as P from mindspore.common.tensor import Tensor import mindspore.communication.management as distributedTool -device_num=4 +device_num = 4 device_id = int(os.environ["RANK_ID"]) path = "./output/" + def setup_module(): print("~~~~~~~~~~~set up~~~~~~~~~~~~~") context.set_context(mode=context.GRAPH_MODE) @@ -35,6 +36,7 @@ def setup_module(): distributedTool.create_group("0-3", [0, 1, 2, 3]) print("~~~~~~~~~~~set up finished~~~~~~~~~~~~~") + def teardown_module(): print("~~~~~~~~~~~~tear down~~~~~~~~~~") @@ -57,10 +59,10 @@ class OneHotFactory: prefix = "" for s in input_shape: prefix = prefix + str(s) - size = size*s + size = size * s self.prefix = prefix number_range = min(10, size) - self.input_np = np.reshape(np.arange(0, size)%number_range, input_shape).astype(np.int32) + self.input_np = np.reshape(np.arange(0, size) % number_range, input_shape).astype(np.int32) self.depth = depth self.on_value = on_value self.off_value = off_value @@ -69,22 +71,22 @@ class OneHotFactory: self.strategy0 = strategy0 need_dev_num = 1 for s in strategy0[1]: - need_dev_num = need_dev_num*s - self.x_id = device_id%need_dev_num - self.out_id = device_id%need_dev_num - + need_dev_num = need_dev_num * s + self.x_id = device_id % need_dev_num + self.out_id = device_id % need_dev_num + def get_parallel_blocks(self, input_, strategy): blocks = [input_] i = 0 for stra in strategy: temp = [] - while len(blocks)>0: + while len(blocks) > 0: block = blocks.pop(0) temp.extend(np.split(block, stra, axis=i)) blocks.extend(temp) - i+=1 - return blocks - + i += 1 + return blocks + def grad_mindspore_impl(self): output_grad = Tensor(self.output_grad_np) x = Tensor(self.input_np1) @@ -94,23 +96,23 @@ class OneHotFactory: grad_net.set_train() input_grad = grad_net(x, y, output_grad) return input_grad - + def forward_mindspore_impl(self): indices = Tensor(self.input_np) - net = Onehot(axis=self.axis, - depth=self.depth, - on_value=self.on_value, + net = Onehot(axis=self.axis, + depth=self.depth, + on_value=self.on_value, off_value=self.off_value) out = net(indices) return out.asnumpy() - + def forward_mindspore_parallel_impl(self): x = Tensor(self.input_np) inputs_x = self.get_parallel_blocks(self.input_np, self.strategy0[1]) x1 = Tensor(inputs_x[self.x_id]) - net = Onehot(axis=self.axis, - depth=self.depth, - on_value=self.on_value, + net = Onehot(axis=self.axis, + depth=self.depth, + on_value=self.on_value, off_value=self.off_value, strategy=self.strategy0) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() @@ -131,7 +133,7 @@ def test_reid_onehot_forward_int32_128_depth13000(): off_value=0.000000, axis=-1, dtype="float32", - strategy0=(0,(2,))) + strategy0=(0, (2,))) fact.forward_cmp() @@ -142,6 +144,5 @@ def test_reid_onehot_forward_int32_131072_depth127(): off_value=0.000000, axis=-1, dtype="float32", - strategy0=(0,(4,))) + strategy0=(0, (4,))) fact.forward_cmp() - diff --git a/tests/ut/python/parallel/parallel_end_to_end/prelu/_test_prelu_parallel_4p.py b/tests/ut/python/parallel/parallel_end_to_end/prelu/_test_prelu_parallel_4p.py index 4a0e8157b6..100f1cd7c2 100644 --- a/tests/ut/python/parallel/parallel_end_to_end/prelu/_test_prelu_parallel_4p.py +++ b/tests/ut/python/parallel/parallel_end_to_end/prelu/_test_prelu_parallel_4p.py @@ -24,21 +24,24 @@ from mindspore.ops import operations as P from mindspore.common.tensor import Tensor from mindspore.ops.composite import grad_all_with_sens -device_num=4 +device_num = 4 device_id = int(os.environ["RANK_ID"]) path = "./output/" + def setup_module(): print("~~~~~~~~~~~set up~~~~~~~~~~~~~") context.set_context(mode=context.GRAPH_MODE) context.set_auto_parallel_context(device_num=device_num, global_rank=device_id) distributedTool.init() - distributedTool.create_group("0-3", [0,1,2,3]) + distributedTool.create_group("0-3", [0, 1, 2, 3]) print("~~~~~~~~~~~set up finished~~~~~~~~~~~~~") + def teardown_module(): print("~~~~~~~~~~~~tear down~~~~~~~~~~") - + + class PReLU(Cell): def __init__(self, channel=1, w=0.25, strategy_=None, strategy1_=None): super(PReLU, self).__init__() @@ -55,8 +58,8 @@ class Grad(Cell): super(Grad, self).__init__() self.network = network - def construct(self, input,z, w, output_grad): - return grad_all_with_sens(self.network)(input,z,w, output_grad) + def construct(self, input, z, w, output_grad): + return grad_all_with_sens(self.network)(input, z, w, output_grad) class PReLUFactory: @@ -66,11 +69,12 @@ class PReLUFactory: size = 1 for s in input_shape: prefix = prefix + str(s) - size = size*s + size = size * s self.prefix = prefix number_range = min(1000, size) - self.input_np = np.reshape(np.arange(0, size)%number_range - number_range/2, input_shape).astype(np.float32) - self.output_grad_np = np.reshape((np.arange(0, size)%(number_range-10) - number_range/2)*0.1, input_shape).astype(np.float32) + self.input_np = np.reshape(np.arange(0, size) % number_range - number_range / 2, input_shape).astype(np.float32) + self.output_grad_np = np.reshape((np.arange(0, size) % (number_range - 10) - number_range / 2) * 0.1, + input_shape).astype(np.float32) self.channel = c self.weight = np.array([np.float32(0.25)] * c) self.strategy = strategy @@ -84,28 +88,29 @@ class PReLUFactory: return out.asnumpy() def forward_mindspore_parallel_impl(self): - net = PReLU(channel=self.channel, w=self.weight, strategy_=self.strategy, strategy1_=(self.strategy[0], self.strategy[1], self.strategy[1])) + net = PReLU(channel=self.channel, w=self.weight, strategy_=self.strategy, + strategy1_=(self.strategy[0], self.strategy[1], self.strategy[1])) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() x = Tensor(self.input_np) z = Tensor(np.zeros(self.input_np.shape), ms.float32) w = Tensor(self.weight) - + inputs = self.get_parallel_blocks(self.input_np, self.strategy[1]) - block_id = device_id%len(inputs) + block_id = device_id % len(inputs) x1 = Tensor(inputs[block_id]) z1 = Tensor(np.zeros(inputs[block_id].shape), ms.float32) w1 = Tensor(self.weight) - - out = net(x, z, w, parallel_inputs_compile=[x, z, w], parallel_inputs_run=[x1, z1 ,w1]) + + out = net(x, z, w, parallel_inputs_compile=[x, z, w], parallel_inputs_run=[x1, z1, w1]) return out.asnumpy() - + def grad_mindspore_impl(self): output_grad = Tensor(self.output_grad_np) x = Tensor(self.input_np) z = Tensor(np.zeros(self.input_np.shape), ms.float32) w = Tensor(self.weight) - + net = PReLU(channel=self.channel, w=self.weight) grad_net = Grad(net) grad_net.set_train() @@ -114,43 +119,45 @@ class PReLUFactory: def grad_mindspore_parallel_impl(self): output_grads = self.get_parallel_blocks(self.output_grad_np, self.strategy[1]) - block_id = device_id%len(output_grads) - output_grad = Tensor(output_grads[block_id]) + block_id = device_id % len(output_grads) + output_grad = Tensor(output_grads[block_id]) x = Tensor(self.input_np) z = Tensor(np.zeros(self.input_np.shape), ms.float32) w = Tensor(self.weight) - - net = PReLU(channel=self.channel, w=self.weight, strategy_=self.strategy, strategy1_=(self.strategy[0], self.strategy[1], self.strategy[1])) + + net = PReLU(channel=self.channel, w=self.weight, strategy_=self.strategy, + strategy1_=(self.strategy[0], self.strategy[1], self.strategy[1])) grad_net = Grad(net) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") grad_net.set_auto_parallel() - + grad_net.set_train() inputs = self.get_parallel_blocks(self.input_np, self.strategy[1]) x1 = Tensor(inputs[block_id]) z1 = Tensor(np.zeros(inputs[block_id].shape), ms.float32) w1 = Tensor(self.weight) - - input_grad = grad_net(x, z, w, output_grad, parallel_inputs_compile=[x, z, w, output_grad], parallel_inputs_run=[x1, z1, w1, output_grad]) + + input_grad = grad_net(x, z, w, output_grad, parallel_inputs_compile=[x, z, w, output_grad], + parallel_inputs_run=[x1, z1, w1, output_grad]) return input_grad - + def get_parallel_blocks(self, input_, strategy): blocks = [input_] i = 0 for stra in strategy: temp = [] - while len(blocks)>0: + while len(blocks) > 0: block = blocks.pop(0) temp.extend(np.split(block, stra, axis=i)) blocks.extend(temp) - i+=1 + i += 1 return blocks - + def forward_cmp(self): out_mindspore = self.forward_mindspore_impl() out_mindspore_parallel = self.forward_mindspore_parallel_impl() out_blocks = self.get_parallel_blocks(out_mindspore, self.strategy[1]) - block_id = device_id%len(out_blocks) + block_id = device_id % len(out_blocks) assert np.allclose(out_blocks[block_id], out_mindspore_parallel, 0.0001, 0.001) def grad_cmp(self): @@ -164,34 +171,35 @@ class PReLUFactory: input_grad_mindspore_parallel2 = input_grad_mindspore_parallel[2].asnumpy() input_grad_blocks = self.get_parallel_blocks(input_grad_mindspore0, self.strategy[1]) input1_grad_blocks = self.get_parallel_blocks(input_grad_mindspore1, self.strategy[1]) - block_id = device_id%len(input_grad_blocks) + block_id = device_id % len(input_grad_blocks) assert np.allclose(input_grad_blocks[block_id], input_grad_mindspore_parallel0, 0.0001, 0.0001) assert np.allclose(input_grad_mindspore2, input_grad_mindspore_parallel2, 0.0001, 0.0001) assert np.allclose(input1_grad_blocks[block_id], input_grad_mindspore_parallel1, 0.0001, 0.0001) - - + @pytest.mark.reid_grad def test_reid_prelu_input_128x64x112x112_repeat(): - stra = (0,(1,1,2,1),(1)) + stra = (0, (1, 1, 2, 1), (1)) fact = PReLUFactory(input_shape=(128, 64, 112, 112), strategy=stra) - fact.forward_cmp() - + fact.forward_cmp() + + @pytest.mark.reid_grad def test_reid_grad_prelu_input_128x64x112x112_repeat(): - stra = (0,(1,1,2,1),(1)) + stra = (0, (1, 1, 2, 1), (1)) fact = PReLUFactory(input_shape=(128, 64, 112, 112), strategy=stra) fact.grad_cmp() - + + @pytest.mark.reid_grad def test_reid_prelu_input_128x64x112x112_mix(): - stra = (0,(2,1,1,2),(1)) + stra = (0, (2, 1, 1, 2), (1)) fact = PReLUFactory(input_shape=(128, 64, 112, 112), strategy=stra) fact.forward_cmp() - + + @pytest.mark.reid_grad def test_reid_grad_prelu_input_128x64x112x112_mix(): - stra = (0,(2,1,1,2),(1)) + stra = (0, (2, 1, 1, 2), (1)) fact = PReLUFactory(input_shape=(128, 64, 112, 112), strategy=stra) fact.grad_cmp() - diff --git a/tests/ut/python/parallel/parallel_end_to_end/reshape/_test_reshape_parallel_4p.py b/tests/ut/python/parallel/parallel_end_to_end/reshape/_test_reshape_parallel_4p.py index 64582d02e8..a4d1eb0808 100644 --- a/tests/ut/python/parallel/parallel_end_to_end/reshape/_test_reshape_parallel_4p.py +++ b/tests/ut/python/parallel/parallel_end_to_end/reshape/_test_reshape_parallel_4p.py @@ -23,10 +23,11 @@ from mindspore.common.tensor import Tensor from numpy import allclose as allclose_nparray from mindspore.ops.composite import grad_all_with_sens -device_num=4 +device_num = 4 device_id = int(os.environ["RANK_ID"]) path = "./output/" + def setup_module(): print("~~~~~~~~~~~set up~~~~~~~~~~~~~") context.set_context(mode=context.GRAPH_MODE) @@ -35,6 +36,7 @@ def setup_module(): distributedTool.create_group("0-3", [0, 1, 2, 3]) print("~~~~~~~~~~~set up finished~~~~~~~~~~~~~") + def teardown_module(): print("~~~~~~~~~~~~tear down~~~~~~~~~~") @@ -49,7 +51,7 @@ class Grad(Cell): class Reshape(Cell): - def __init__(self, target_shape, strategy0 = None, strategy1 = None): + def __init__(self, target_shape, strategy0=None, strategy1=None): super(Reshape, self).__init__() self.add = P.TensorAdd(strategy=strategy0) self.reshape = P.Reshape(strategy=strategy1) @@ -66,59 +68,62 @@ class ReshapeFactory: size = 1 for s in input_shape: prefix = prefix + str(s) - size = size*s + size = size * s self.prefix = prefix number_range = min(1000, size) - self.input_np1 = np.reshape(np.arange(0, size)%number_range - number_range/2, input_shape).astype(np.float32) - self.input_np2 = np.reshape(np.arange(0, size)%number_range - number_range/4, input_shape).astype(np.float32) + self.input_np1 = np.reshape(np.arange(0, size) % number_range - number_range / 2, input_shape).astype( + np.float32) + self.input_np2 = np.reshape(np.arange(0, size) % number_range - number_range / 4, input_shape).astype( + np.float32) target_size = 1 for s in target_shape: - target_size = target_size*s - number_range = min(1000, target_size) - self.output_grad_np = np.reshape(np.arange(0, target_size)%number_range - number_range/2, target_shape).astype(np.float32) + target_size = target_size * s + number_range = min(1000, target_size) + self.output_grad_np = np.reshape(np.arange(0, target_size) % number_range - number_range / 2, + target_shape).astype(np.float32) self.target_shape = target_shape self.strategy0 = strategy0 self.strategy1 = strategy1 - out_strategy = [1]*len(target_shape) + out_strategy = [1] * len(target_shape) out_strategy[0] = strategy1[1][0] self.out_strategy = out_strategy - + need_dev_num0 = 1 need_dev_num1 = 1 for s in strategy0[1]: - need_dev_num0 = need_dev_num0*s + need_dev_num0 = need_dev_num0 * s for s in out_strategy: - need_dev_num1 = need_dev_num1*s - self.x_id = device_id%need_dev_num0 - self.y_id = device_id%need_dev_num0 - self.out_id = device_id%need_dev_num1 + need_dev_num1 = need_dev_num1 * s + self.x_id = device_id % need_dev_num0 + self.y_id = device_id % need_dev_num0 + self.out_id = device_id % need_dev_num1 def get_parallel_blocks(self, input_, strategy): blocks = [input_] i = 0 for stra in strategy: temp = [] - while len(blocks)>0: + while len(blocks) > 0: block = blocks.pop(0) temp.extend(np.split(block, stra, axis=i)) blocks.extend(temp) - i+=1 + i += 1 return blocks - + def forward_reshape_mindspore_impl(self): x = Tensor(self.input_np1) y = Tensor(self.input_np2) net = Reshape(self.target_shape) out = net(x, y) return out.asnumpy() - + def forward_reshape_mindspore_parallel_impl(self): x = Tensor(self.input_np1) y = Tensor(self.input_np2) inputs_x = self.get_parallel_blocks(self.input_np1, self.strategy0[1]) inputs_y = self.get_parallel_blocks(self.input_np2, self.strategy0[1]) x1 = Tensor(inputs_x[self.x_id]) - y1 = Tensor(inputs_y[self.y_id]) + y1 = Tensor(inputs_y[self.y_id]) net = Reshape(self.target_shape, strategy0=self.strategy0, strategy1=self.strategy1) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() @@ -134,7 +139,7 @@ class ReshapeFactory: grad_net.set_train() input_grad = grad_net(x, y, output_grad) return input_grad - + def grad_reshape_mindspore_parallel_impl(self): x = Tensor(self.input_np1) y = Tensor(self.input_np2) @@ -150,7 +155,8 @@ class ReshapeFactory: context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") grad_net.set_auto_parallel() grad_net.set_train() - input_grad = grad_net(x, y, output_grad, parallel_inputs_compile=[x, y, output_grad1], parallel_inputs_run=[x1, y1, output_grad1]) + input_grad = grad_net(x, y, output_grad, parallel_inputs_compile=[x, y, output_grad1], + parallel_inputs_run=[x1, y1, output_grad1]) return input_grad def forward_reshape_cmp(self): @@ -171,22 +177,29 @@ class ReshapeFactory: assert allclose_nparray(input_grad_blocks_0[self.x_id], input_grad_mindspore_parallel0, 0.0001, 0.0001) assert allclose_nparray(input_grad_blocks_1[self.y_id], input_grad_mindspore_parallel1, 0.0001, 0.0001) + @pytest.mark.reid_forward def test_reid_reshape_input_128x512x7x7_target_128x25088(): - fact = ReshapeFactory(input_shape=(128, 512, 7, 7), target_shape=(128, 25088), strategy0=(0,(4,1,1,1),(4,1,1,1)), strategy1=(0,(4,1,1,1))) + fact = ReshapeFactory(input_shape=(128, 512, 7, 7), target_shape=(128, 25088), + strategy0=(0, (4, 1, 1, 1), (4, 1, 1, 1)), strategy1=(0, (4, 1, 1, 1))) fact.forward_reshape_cmp() + def test_reid_reshape_grad_input_128x512x7x7_target_128x25088(): - fact = ReshapeFactory(input_shape=(128, 512, 7, 7), target_shape=(128, 25088), strategy0=(0,(4,1,1,1),(4,1,1,1)), strategy1=(0,(4,1,1,1))) + fact = ReshapeFactory(input_shape=(128, 512, 7, 7), target_shape=(128, 25088), + strategy0=(0, (4, 1, 1, 1), (4, 1, 1, 1)), strategy1=(0, (4, 1, 1, 1))) fact.grad_reshape_cmp() + @pytest.mark.reid_forward def test_reid_reshape_input_128x64_target_128x64x1x1(): - fact = ReshapeFactory(input_shape=(128, 64), target_shape=(128, 64, 1, 1), strategy0=(0,(2,1),(2,1)), strategy1=(0,(2,1))) + fact = ReshapeFactory(input_shape=(128, 64), target_shape=(128, 64, 1, 1), strategy0=(0, (2, 1), (2, 1)), + strategy1=(0, (2, 1))) fact.forward_reshape_cmp() - + + @pytest.mark.reid_grad def test_reid_reshape_grad_input_128x64_target_128x64x1x1(): - fact = ReshapeFactory(input_shape=(128, 64), target_shape=(128, 64, 1, 1), strategy0=(0,(2,1),(2,1)), strategy1=(0,(2,1))) + fact = ReshapeFactory(input_shape=(128, 64), target_shape=(128, 64, 1, 1), strategy0=(0, (2, 1), (2, 1)), + strategy1=(0, (2, 1))) fact.grad_reshape_cmp() - diff --git a/tests/ut/python/parallel/parallel_end_to_end/transpose/_test_transpose_parallel_4p.py b/tests/ut/python/parallel/parallel_end_to_end/transpose/_test_transpose_parallel_4p.py index 8a77d23c2b..2efccab47d 100644 --- a/tests/ut/python/parallel/parallel_end_to_end/transpose/_test_transpose_parallel_4p.py +++ b/tests/ut/python/parallel/parallel_end_to_end/transpose/_test_transpose_parallel_4p.py @@ -23,10 +23,11 @@ from mindspore.common.tensor import Tensor from numpy import allclose as allclose_nparray from mindspore.ops.composite import grad_all_with_sens -device_num=4 +device_num = 4 device_id = int(os.environ["RANK_ID"]) path = "./output/" + def setup_module(): print("~~~~~~~~~~~set up~~~~~~~~~~~~~") context.set_context(mode=context.GRAPH_MODE) @@ -35,13 +36,13 @@ def setup_module(): distributedTool.create_group("0-3", [0, 1, 2, 3]) print("~~~~~~~~~~~set up finished~~~~~~~~~~~~~") + def teardown_module(): print("~~~~~~~~~~~~tear down~~~~~~~~~~") - class Net(Cell): - def __init__(self, perm_in, strategy0 = None, strategy1 = None): + def __init__(self, perm_in, strategy0=None, strategy1=None): super(Net, self).__init__() self.add = P.TensorAdd(strategy=strategy0) self.transpose = P.Transpose(strategy=strategy1) @@ -51,6 +52,7 @@ class Net(Cell): out = self.add(x, y) return self.transpose(out, self.perm_in) + class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() @@ -60,41 +62,44 @@ class Grad(Cell): return grad_all_with_sens(self.network)(x, y, output_grad) - class TransposeFactory: def __init__(self, input_shape, perm_in, strategy0, strategy1): prefix = "" size = 1 for s in input_shape: prefix = prefix + str(s) - size = size*s + size = size * s self.prefix = prefix number_range = min(1000, size) - self.input_np1 = np.reshape(np.arange(0, size)%number_range - number_range/2, input_shape).astype(np.float32) - self.input_np2 = np.reshape(np.arange(0, size)%number_range - number_range/4, input_shape).astype(np.float32) + self.input_np1 = np.reshape(np.arange(0, size) % number_range - number_range / 2, input_shape).astype( + np.float32) + self.input_np2 = np.reshape(np.arange(0, size) % number_range - number_range / 4, input_shape).astype( + np.float32) target_shape = self.input_np1.transpose(perm_in).shape target_size = 1 for s in target_shape: - target_size = target_size*s - number_range = min(1000, target_size) + target_size = target_size * s + number_range = min(1000, target_size) self.target_shape = target_shape - self.output_grad_np = np.reshape(np.arange(0, target_size)%number_range - number_range/2, target_shape).astype(np.float32) + self.output_grad_np = np.reshape(np.arange(0, target_size) % number_range - number_range / 2, + target_shape).astype(np.float32) self.perm_in = perm_in - self.strategy0=strategy0 - self.strategy1=strategy1 - out_strategy=[] + self.strategy0 = strategy0 + self.strategy1 = strategy1 + out_strategy = [] for i in perm_in: out_strategy.append(strategy1[1][i]) self.out_strategy = out_strategy need_dev_num0 = 1 need_dev_num1 = 1 for s in strategy0[1]: - need_dev_num0 = need_dev_num0*s + need_dev_num0 = need_dev_num0 * s for s in out_strategy: - need_dev_num1 = need_dev_num1*s - self.x_id = device_id%need_dev_num0 - self.y_id = device_id%need_dev_num0 - device_index = self.id_to_list(device_id%need_dev_num1, self.strategy1[1]) #encoding to get the index before transpose + need_dev_num1 = need_dev_num1 * s + self.x_id = device_id % need_dev_num0 + self.y_id = device_id % need_dev_num0 + device_index = self.id_to_list(device_id % need_dev_num1, + self.strategy1[1]) # encoding to get the index before transpose device_index_transpose = [] for i in perm_in: device_index_transpose.append(device_index[i]) @@ -105,53 +110,53 @@ class TransposeFactory: i = 0 for stra in strategy: temp = [] - while len(blocks)>0: + while len(blocks) > 0: block = blocks.pop(0) temp.extend(np.split(block, stra, axis=i)) blocks.extend(temp) - i+=1 + i += 1 return blocks - + def id_to_list(self, id, shape): result = [] r = id for i in range(0, len(shape)): v = 1 - for j in range(i+1, len(shape)): - v = v*shape[j] - result.append(r//v) - r = r%v + for j in range(i + 1, len(shape)): + v = v * shape[j] + result.append(r // v) + r = r % v return result - + def list_to_id(self, id_list, shape): result = 0 for i in range(0, len(id_list)): v = 1 - for j in range(i+1, len(id_list)): - v = v*shape[j] - result = result + id_list[i]*v + for j in range(i + 1, len(id_list)): + v = v * shape[j] + result = result + id_list[i] * v return result - + def forward_mindspore_impl(self): x = Tensor(self.input_np1) y = Tensor(self.input_np2) net = Net(self.perm_in) out = net(x, y) return out.asnumpy() - + def forward_mindspore_parallel_impl(self): x = Tensor(self.input_np1) y = Tensor(self.input_np2) inputs_x = self.get_parallel_blocks(self.input_np1, self.strategy0[1]) inputs_y = self.get_parallel_blocks(self.input_np2, self.strategy0[1]) x1 = Tensor(inputs_x[self.x_id]) - y1 = Tensor(inputs_y[self.y_id]) + y1 = Tensor(inputs_y[self.y_id]) net = Net(self.perm_in, strategy0=self.strategy0, strategy1=self.strategy1) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() out = net(x, y, parallel_inputs_compile=[x, y], parallel_inputs_run=[x1, y1]) return out.asnumpy() - + def grad_mindspore_impl(self): x = Tensor(self.input_np1) y = Tensor(self.input_np2) @@ -177,7 +182,8 @@ class TransposeFactory: context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") grad_net.set_auto_parallel() grad_net.set_train() - input_grad = grad_net(x, y, output_grad, parallel_inputs_compile=[x, y, output_grad1], parallel_inputs_run=[x1, y1, output_grad1]) + input_grad = grad_net(x, y, output_grad, parallel_inputs_compile=[x, y, output_grad1], + parallel_inputs_run=[x1, y1, output_grad1]) return input_grad def forward_transpose_cmp(self): @@ -200,25 +206,30 @@ class TransposeFactory: def test_reid_transpose_input_256x512_output_512x256_perm_1x0(): - fact = TransposeFactory((256, 512), (1, 0), strategy0=(0,(2,2),(2,2)), strategy1=(0,(2,2))) + fact = TransposeFactory((256, 512), (1, 0), strategy0=(0, (2, 2), (2, 2)), strategy1=(0, (2, 2))) fact.forward_transpose_cmp() + def test_reid_grad_transpose_input_256x512_output_512x256_perm_1x0(): - fact = TransposeFactory((256, 512), (1, 0), strategy0=(0,(2,2),(2,2)), strategy1=(0,(2,2))) + fact = TransposeFactory((256, 512), (1, 0), strategy0=(0, (2, 2), (2, 2)), strategy1=(0, (2, 2))) fact.grad_transpose_cmp() + def test_reid_transpose_input_512x256_output_256x512_perm_1x0(): - fact = TransposeFactory((512, 256), (1, 0), strategy0=(0,(4,1),(4,1)), strategy1=(0,(1,4))) + fact = TransposeFactory((512, 256), (1, 0), strategy0=(0, (4, 1), (4, 1)), strategy1=(0, (1, 4))) fact.forward_transpose_cmp() - + + def test_reid_grad_transpose_input_512x256_output_256x512_perm_1x0(): - fact = TransposeFactory((512, 256), (1, 0), strategy0=(0,(4,1),(4,1)), strategy1=(0,(1,4))) + fact = TransposeFactory((512, 256), (1, 0), strategy0=(0, (4, 1), (4, 1)), strategy1=(0, (1, 4))) fact.grad_transpose_cmp() + def test_reid_transpose_input_512x256_output_256x512_perm_1x0_repeat(): - fact = TransposeFactory((512, 256), (1, 0), strategy0=(0,(2,1),(2,1)), strategy1=(0,(2,1))) + fact = TransposeFactory((512, 256), (1, 0), strategy0=(0, (2, 1), (2, 1)), strategy1=(0, (2, 1))) fact.forward_transpose_cmp() - + + def test_reid_grad_transpose_input_512x256_output_256x512_perm_1x0_repeat(): - fact = TransposeFactory((512, 256), (1, 0), strategy0=(0,(2,1),(2,1)), strategy1=(0,(2,1))) - fact.grad_transpose_cmp() + fact = TransposeFactory((512, 256), (1, 0), strategy0=(0, (2, 1), (2, 1)), strategy1=(0, (2, 1))) + fact.grad_transpose_cmp() diff --git a/tests/ut/python/parallel/test_add_relu_redistribution.py b/tests/ut/python/parallel/test_add_relu_redistribution.py index 51f04b14d0..1288efe29e 100644 --- a/tests/ut/python/parallel/test_add_relu_redistribution.py +++ b/tests/ut/python/parallel/test_add_relu_redistribution.py @@ -21,6 +21,7 @@ import mindspore as ms from mindspore.common.api import _executor from mindspore.ops import composite as C + class AddRelu(nn.Cell): def __init__(self, strategy0=None, strategy1=None): super(AddRelu, self).__init__() @@ -31,6 +32,7 @@ class AddRelu(nn.Cell): out = self.add(x, z) return self.relu(out) + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -41,6 +43,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x, z) return self.loss(predict) + class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() @@ -57,9 +60,9 @@ def compile(net, x, y): def test_add_relu_stride_slice(): context.set_auto_parallel_context(device_num=8, global_rank=7) - + strategy0 = ((1, 1), (1, 1)) - strategy1 = ((8, 1), ) + strategy1 = ((8, 1),) net = Grad(NetWithLoss(AddRelu(strategy0, strategy1))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -67,14 +70,15 @@ def test_add_relu_stride_slice(): y = Tensor(np.ones([128, 32]), dtype=ms.float32) compile(net, x, y) + def test_add_relu_all_gather(): context.set_auto_parallel_context(device_num=8, global_rank=7) - + strategy0 = ((8, 1), (8, 1)) - strategy1 = ((1, 1), ) + strategy1 = ((1, 1),) net = Grad(NetWithLoss(AddRelu(strategy0, strategy1))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") x = Tensor(np.ones([128, 32]), dtype=ms.float32) y = Tensor(np.ones([128, 32]), dtype=ms.float32) - compile(net, x, y) \ No newline at end of file + compile(net, x, y) diff --git a/tests/ut/python/parallel/test_allreduce_fusion.py b/tests/ut/python/parallel/test_allreduce_fusion.py index b8bf9ccc0f..6c5413105f 100644 --- a/tests/ut/python/parallel/test_allreduce_fusion.py +++ b/tests/ut/python/parallel/test_allreduce_fusion.py @@ -86,6 +86,7 @@ class DenseNet2(nn.Cell): z = self.fc8(w) return z + class SimpleDMLNet(nn.Cell): def __init__(self, net1, net2): super(SimpleDMLNet, self).__init__() @@ -103,10 +104,11 @@ def train_common(net): learning_rate = 0.1 momentum = 0.9 epoch_size = 2 - device_num=4 + device_num = 4 context.reset_auto_parallel_context() auto_parallel_context().set_enable_all_reduce_fusion(enable_all_reduce_fusion=True) - context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, device_num=device_num, parameter_broadcast=False) + context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, device_num=device_num, + parameter_broadcast=False) context.set_context(mode=context.GRAPH_MODE) predict = Tensor(np.ones([batch_size, 128]), dtype=ms.float32) @@ -155,10 +157,12 @@ def test_allreduce_fusion_parameters(): assert (tail_time == 0.1) cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_allreduce_inherent_time=0.2) - allreduce_inherent_time = cost_model_context.get_cost_model_context('costmodel_allreduce_fusion_allreduce_inherent_time') + allreduce_inherent_time = cost_model_context.get_cost_model_context( + 'costmodel_allreduce_fusion_allreduce_inherent_time') assert (allreduce_inherent_time == 0.2) cost_model_context.reset_cost_model_context() - allreduce_inherent_time = cost_model_context.get_cost_model_context('costmodel_allreduce_fusion_allreduce_inherent_time') + allreduce_inherent_time = cost_model_context.get_cost_model_context( + 'costmodel_allreduce_fusion_allreduce_inherent_time') assert (allreduce_inherent_time == 0.1) cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_allreduce_bandwidth=0.2) @@ -169,10 +173,12 @@ def test_allreduce_fusion_parameters(): assert (allreduce_bandwidth == 0.1) cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_computation_time_parameter=0.2) - computation_time_parameter = cost_model_context.get_cost_model_context('costmodel_allreduce_fusion_computation_time_parameter') + computation_time_parameter = cost_model_context.get_cost_model_context( + 'costmodel_allreduce_fusion_computation_time_parameter') assert (computation_time_parameter == 0.2) cost_model_context.reset_cost_model_context() - computation_time_parameter = cost_model_context.get_cost_model_context('costmodel_allreduce_fusion_computation_time_parameter') + computation_time_parameter = cost_model_context.get_cost_model_context( + 'costmodel_allreduce_fusion_computation_time_parameter') assert (computation_time_parameter == 0.1) @@ -288,8 +294,7 @@ def test_allreduce_fusion5(): 'backbone1.fc4.weight': 2, 'backbone1.fc3.weight': 2, 'backbone1.fc2.weight': 1, - 'backbone1.fc1.weight': 1,} + 'backbone1.fc1.weight': 1, } assert (allreduce_fusion_dict == expect_dict) cost_model_context.reset_cost_model_context() - diff --git a/tests/ut/python/parallel/test_alltoall.py b/tests/ut/python/parallel/test_alltoall.py index 7365dd941c..9ac4209173 100644 --- a/tests/ut/python/parallel/test_alltoall.py +++ b/tests/ut/python/parallel/test_alltoall.py @@ -27,7 +27,6 @@ from mindspore.parallel._utils import _reset_op_id from mindspore.common.api import _executor - class Dataset(MindData): def __init__(self, predict, label, length=3): super(Dataset, self).__init__(size=length) @@ -81,7 +80,7 @@ def all_to_all_common(strategy1): loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) loss.softmax_cross_entropy.set_strategy(((8, 1), (8, 1))) - loss.one_hot.set_strategy(((8,1), (), ())) + loss.one_hot.set_strategy(((8, 1), (), ())) opt = Momentum(net.trainable_params(), learning_rate, momentum) model = Model(net, loss, opt) @@ -91,20 +90,22 @@ def all_to_all_common(strategy1): def test_all_to_all(): - strategy1 = ((8, 1), ) + strategy1 = ((8, 1),) context.set_context(mode=context.GRAPH_MODE, save_graphs=False) _reset_op_id() strategys = all_to_all_common(strategy1) print(strategys) expect_dict = {'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits' '/SoftmaxCrossEntropyWithLogits-op3': [[8, 1], [8, 1]], - 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits/OneHot-op4': [[8, 1], [], []], - 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/Transpose-op1': [[8, 1]], - 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/MatMul-op0': [[1, 1], [1, 8]]} + 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits/OneHot-op4': [ + [8, 1], [], []], + 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/Transpose-op1': [ + [8, 1]], + 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/MatMul-op0': [ + [1, 1], [1, 8]]} assert (strategys == expect_dict) context.set_context(save_graphs=False) if __name__ == '__main__': test_all_to_all() - diff --git a/tests/ut/python/parallel/test_arithmetic.py b/tests/ut/python/parallel/test_arithmetic.py index 5b5e2e1144..3856dd55ff 100644 --- a/tests/ut/python/parallel/test_arithmetic.py +++ b/tests/ut/python/parallel/test_arithmetic.py @@ -64,7 +64,6 @@ def test_matmul_sub(): strategy2 = ((4, 2), (4, 2)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) @@ -143,6 +142,7 @@ def test_matmul_div(): b = Tensor(np.ones([64, 64]), dtype=ms.float32) compile(net, x, y, b) + def test_matmul_greater(): class Net(nn.Cell): def __init__(self, strategy1, strategy2): @@ -166,6 +166,7 @@ def test_matmul_greater(): b = Tensor(np.ones([64, 64]), dtype=ms.float32) compile(net, x, y, b) + def test_matmul_add_broadcast(): class Net(nn.Cell): def __init__(self, strategy1, strategy2): @@ -180,7 +181,7 @@ def test_matmul_add_broadcast(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((2, 2), (2, 2)) - strategy2 = ((4, 2), (2, )) + strategy2 = ((4, 2), (2,)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -228,7 +229,7 @@ def test_matmul_sub_broadcast(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((2, 2), (2, 2)) - strategy2 = ((4, 2), (2, )) + strategy2 = ((4, 2), (2,)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -276,7 +277,7 @@ def test_matmul_mul_broadcast(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((2, 2), (2, 2)) - strategy2 = ((4, 2), (2, )) + strategy2 = ((4, 2), (2,)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -324,7 +325,7 @@ def test_matmul_div_broadcast(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((2, 2), (2, 2)) - strategy2 = ((4, 2), (2, )) + strategy2 = ((4, 2), (2,)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -357,6 +358,7 @@ def test_matmul_div_broadcast2(): b = Tensor(np.ones([1, 64]), dtype=ms.float32) compile(net, x, y, b) + def test_matmul_greater_broadcast(): class Net(nn.Cell): def __init__(self, strategy1, strategy2): @@ -371,7 +373,7 @@ def test_matmul_greater_broadcast(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((2, 2), (2, 2)) - strategy2 = ((4, 2), (2, )) + strategy2 = ((4, 2), (2,)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -404,6 +406,7 @@ def test_matmul_greater_broadcast2(): b = Tensor(np.ones([1, 64]), dtype=ms.float32) compile(net, x, y, b) + def test_matmul_floordiv(): class Net(nn.Cell): def __init__(self, strategy1, strategy2): @@ -442,7 +445,7 @@ def test_matmul_floordiv_broadcast(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((2, 2), (2, 2)) - strategy2 = ((4, 2), (2, )) + strategy2 = ((4, 2), (2,)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -483,10 +486,10 @@ def test_assign_sub(): self.assign_sub = P.AssignSub() self.mul = P.Mul() self.mul_weight = Parameter(Tensor(np.full([128, 32], - 0.5, dtype=np.float32)), + 0.5, dtype=np.float32)), name="mul_weight") self.assignsub_weight = Parameter(Tensor(np.full([128, 32], - 1.1, dtype=np.float32)), + 1.1, dtype=np.float32)), name="assignsub_weight") def construct(self, x, y, z): diff --git a/tests/ut/python/parallel/test_auto_parallel_BN_PReLU.py b/tests/ut/python/parallel/test_auto_parallel_BN_PReLU.py index 77f1386e8c..4b6dbb6c18 100644 --- a/tests/ut/python/parallel/test_auto_parallel_BN_PReLU.py +++ b/tests/ut/python/parallel/test_auto_parallel_BN_PReLU.py @@ -22,6 +22,7 @@ import mindspore as ms from mindspore.common.api import _executor from mindspore.ops import composite as C + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -37,6 +38,7 @@ def bn_with_initialize(out_channels): bn = nn.BatchNorm2d(out_channels, momentum=0.1, eps=1e-5).add_flags_recursive(fp32=True) return bn + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -46,6 +48,8 @@ class GradWrap(nn.Cell): return C.grad_all(self.network)(x) # model_parallel test + + def test_auto_parallel_bn_with_prelu(): class Net(nn.Cell): def __init__(self): @@ -58,11 +62,10 @@ def test_auto_parallel_bn_with_prelu(): out = self.prelu(out) return out - size = 8 context.set_auto_parallel_context(device_num=size, global_rank=0) - - x = Tensor(np.random.rand(16, 16, 32, 64),dtype=ms.float32) + + x = Tensor(np.random.rand(16, 16, 32, 64), dtype=ms.float32) net = GradWrap(NetWithLoss(Net())) context.set_auto_parallel_context(parallel_mode="auto_parallel") diff --git a/tests/ut/python/parallel/test_auto_parallel_arithmetic.py b/tests/ut/python/parallel/test_auto_parallel_arithmetic.py index 00473a5c7e..e0d763e524 100644 --- a/tests/ut/python/parallel/test_auto_parallel_arithmetic.py +++ b/tests/ut/python/parallel/test_auto_parallel_arithmetic.py @@ -23,7 +23,10 @@ from mindspore.common.api import _executor from mindspore.ops import composite as C from mindspore.parallel._utils import _reset_op_id as reset_op_id from mindspore import context + context.set_context(mode=context.GRAPH_MODE) + + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -72,9 +75,10 @@ def test_auto_parallel_arithmetic(): compile(net, x, y, b, phase='train') strategies = _executor._get_strategy(net) expected_strategies = {'Default/network-Net/FloorDiv-op0': [[2, 4], [2, 4]], - 'Default/network-Net/MatMul-op1': [[2, 1], [1, 4]]} + 'Default/network-Net/MatMul-op1': [[2, 1], [1, 4]]} assert strategies == expected_strategies + def test_auto_parallel_arithmetic_broadcast_both(): class Net(nn.Cell): def __init__(self): diff --git a/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py b/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py index 3de0596a1d..240e0730e6 100644 --- a/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py +++ b/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py @@ -24,6 +24,7 @@ from mindspore.common.api import _executor from mindspore.ops import composite as C from mindspore.parallel._utils import _reset_op_id as reset_op_id + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -34,6 +35,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x) return self.loss(predict) + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -42,13 +44,14 @@ class GradWrap(nn.Cell): def construct(self, x): return C.grad_all(self.network)(x) - # model_parallel test + + def test_auto_parallel_assign_sub_with_ref_key(): size = 8 context.set_auto_parallel_context(device_num=size, global_rank=0) - - x = Tensor(np.random.rand(4, 4, 32, 64),dtype=ms.float32) + + x = Tensor(np.random.rand(4, 4, 32, 64), dtype=ms.float32) net = NetWithLoss(nn.PReLU(4)) context.set_auto_parallel_context(parallel_mode="auto_parallel") @@ -62,4 +65,3 @@ def test_auto_parallel_assign_sub_with_ref_key(): assert v == [[1, 1, 1, 8], [1]] elif re.search('ReLU-op', k) is not None: assert v == [[1]] - diff --git a/tests/ut/python/parallel/test_auto_parallel_cast.py b/tests/ut/python/parallel/test_auto_parallel_cast.py index 841d28c9aa..3bd8d5a5af 100644 --- a/tests/ut/python/parallel/test_auto_parallel_cast.py +++ b/tests/ut/python/parallel/test_auto_parallel_cast.py @@ -24,6 +24,7 @@ from mindspore.common.api import _executor from mindspore.ops import composite as C from mindspore.parallel._utils import _reset_op_id as reset_op_id + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -34,6 +35,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x, y, z, w) return self.loss(predict) + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -43,6 +45,8 @@ class GradWrap(nn.Cell): return C.grad_all(self.network)(x, y, z, w) # model_parallel test + + def test_double_star_graph(): class Net(nn.Cell): def __init__(self): @@ -53,7 +57,6 @@ def test_double_star_graph(): self.cast1 = P.Cast() self.cast2 = P.Cast() - def construct(self, x, y, z, w): m1_result = self.matmul1(x, y) m2_result = self.matmul2(z, w) @@ -63,7 +66,7 @@ def test_double_star_graph(): size = 8 context.set_auto_parallel_context(device_num=size, global_rank=0) - + x = Tensor(np.ones([32, 8]), dtype=ms.float32) y = Tensor(np.ones([8, 16]), dtype=ms.float32) z = Tensor(np.ones([8, 16]), dtype=ms.float32) diff --git a/tests/ut/python/parallel/test_auto_parallel_common_parameter.py b/tests/ut/python/parallel/test_auto_parallel_common_parameter.py index cf454ccc49..46e08935ca 100644 --- a/tests/ut/python/parallel/test_auto_parallel_common_parameter.py +++ b/tests/ut/python/parallel/test_auto_parallel_common_parameter.py @@ -22,6 +22,7 @@ import mindspore as ms from mindspore.common.api import _executor from mindspore.ops import composite as C + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -32,6 +33,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x, y, z) return self.loss(predict) + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -41,6 +43,8 @@ class GradWrap(nn.Cell): return C.grad_all(self.network)(x, y, z) # model_parallel test + + def test_common_parameter(): class Net(nn.Cell): def __init__(self): @@ -64,4 +68,4 @@ def test_common_parameter(): net = GradWrap(NetWithLoss(Net())) context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() - _executor.compile(net, x, y, z) \ No newline at end of file + _executor.compile(net, x, y, z) diff --git a/tests/ut/python/parallel/test_auto_parallel_double_star.py b/tests/ut/python/parallel/test_auto_parallel_double_star.py index 852726ad45..b31c866396 100644 --- a/tests/ut/python/parallel/test_auto_parallel_double_star.py +++ b/tests/ut/python/parallel/test_auto_parallel_double_star.py @@ -22,6 +22,7 @@ import mindspore as ms from mindspore.common.api import _executor from mindspore.ops import composite as C + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -32,6 +33,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x, y, z, w, a, b, c) return self.loss(predict) + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -41,6 +43,8 @@ class GradWrap(nn.Cell): return C.grad_all(self.network)(x, y, z, w, a, b, c) # model_parallel test + + def test_double_star_graph(): class Net(nn.Cell): def __init__(self): diff --git a/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py b/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py index d842bca6f4..a08e45ec0f 100644 --- a/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py +++ b/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py @@ -12,6 +12,7 @@ from mindspore.common.api import _executor from mindspore.parallel import set_algo_parameters, get_algo_parameters, reset_algo_parameters from mindspore.parallel._utils import _reset_op_id as reset_op_id + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -19,12 +20,14 @@ class Net(nn.Cell): self.relu = P.ReLU() self.wd = Parameter(Tensor(np.ones([8, 8, 8, 8]).astype(np.float32)), name="wide") self.wt = Parameter(Tensor(np.ones([8, 8, 8, 8]).astype(np.float32)), name="l") + def construct(self, x): out = self.mul(x, self.wd) out = self.mul(out, self.wt) out = self.relu(out) return out + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -38,6 +41,7 @@ class NetWithLoss(nn.Cell): loss2 = self.mean(predict, -1) return loss1, loss2 + class IthOutputCell(nn.Cell): def __init__(self, network, output_index): super(IthOutputCell, self).__init__() @@ -48,6 +52,7 @@ class IthOutputCell(nn.Cell): predict = self.network(x)[self.output_index] return predict + class TrainStepWarp(nn.Cell): def __init__(self, network, sens=1000.0): super(TrainStepWarp, self).__init__() @@ -82,6 +87,7 @@ class TrainStepWarp(nn.Cell): grads_d = self.grad_d(self.loss_net_d, weights_d)(x, sens_d) return F.depend(loss_w, self.optimizer_w(grads_w)), F.depend(loss_d, self.optimizer_d(grads_d)) + def test_double_subgraphs(): cost_model_context.set_cost_model_context(multi_subgraphs=True) context.set_context(save_graphs=True) diff --git a/tests/ut/python/parallel/test_auto_parallel_fc_nobias.py b/tests/ut/python/parallel/test_auto_parallel_fc_nobias.py index a4e127693c..fd7d4ff527 100644 --- a/tests/ut/python/parallel/test_auto_parallel_fc_nobias.py +++ b/tests/ut/python/parallel/test_auto_parallel_fc_nobias.py @@ -22,6 +22,7 @@ import mindspore as ms from mindspore.common.api import _executor from mindspore.ops import composite as C + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -32,6 +33,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x, y, b) return self.loss(predict) + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -41,6 +43,8 @@ class GradWrap(nn.Cell): return C.grad_all(self.network)(x, y, b) # model_parallel test + + def test_two_matmul(): class Net(nn.Cell): def __init__(self): diff --git a/tests/ut/python/parallel/test_auto_parallel_four_matmul.py b/tests/ut/python/parallel/test_auto_parallel_four_matmul.py index 5efcda9cbb..a12d41b0b9 100644 --- a/tests/ut/python/parallel/test_auto_parallel_four_matmul.py +++ b/tests/ut/python/parallel/test_auto_parallel_four_matmul.py @@ -22,6 +22,7 @@ import mindspore as ms from mindspore.common.api import _executor from mindspore.ops import composite as C + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -32,6 +33,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x, y, z, w, b) return self.loss(predict) + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -45,8 +47,9 @@ def compile(net, x, y, z, w, b): net.set_auto_parallel() _executor.compile(net, x, y, z, w, b) - # model_parallel test + + def test_four_matmul_linear(): class Net(nn.Cell): def __init__(self): @@ -117,7 +120,7 @@ def test_four_matmul2(): size = 16 context.set_auto_parallel_context(device_num=size, global_rank=0) - + x = Tensor(np.ones([128, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) z = Tensor(np.ones([128, 64]), dtype=ms.float32) diff --git a/tests/ut/python/parallel/test_auto_parallel_inference.py b/tests/ut/python/parallel/test_auto_parallel_inference.py index 0d8cdabb6b..162dbf626e 100644 --- a/tests/ut/python/parallel/test_auto_parallel_inference.py +++ b/tests/ut/python/parallel/test_auto_parallel_inference.py @@ -7,6 +7,7 @@ from mindspore.nn import WithLossCell, TrainOneStepCell from mindspore.nn import Momentum from mindspore.parallel._cost_model_context import set_cost_model_context + class Net(nn.Cell): def __init__(self, input_ch, out_ch): super(Net, self).__init__() @@ -18,6 +19,7 @@ class Net(nn.Cell): x = self.relu(x) return x + def test_inference_phase(): context.set_auto_parallel_context(device_num=8, global_rank=0) context.set_auto_parallel_context(parallel_mode="auto_parallel") @@ -34,4 +36,4 @@ def test_inference_phase(): train_network.set_train() train_network.set_auto_parallel() - output = train_network(predict, label) \ No newline at end of file + output = train_network(predict, label) diff --git a/tests/ut/python/parallel/test_auto_parallel_l2normalize.py b/tests/ut/python/parallel/test_auto_parallel_l2normalize.py index dbe32cd825..8ba645fa11 100644 --- a/tests/ut/python/parallel/test_auto_parallel_l2normalize.py +++ b/tests/ut/python/parallel/test_auto_parallel_l2normalize.py @@ -70,4 +70,3 @@ def test_auto_parallel_l2normalize(): y = Tensor(np.ones([128, 64, 64]), dtype=ms.float32) b = Tensor(np.ones([128, 64, 64]), dtype=ms.float32) _executor.compile(net, x, y, b, phase='train') - diff --git a/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py b/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py index 6c7e5ba6f3..cf6d4ac40f 100644 --- a/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py +++ b/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py @@ -24,6 +24,7 @@ from mindspore.common.api import _executor from mindspore.ops import composite as C from mindspore.parallel._utils import _reset_op_id as reset_op_id + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -34,6 +35,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x, y, b) return self.loss(predict) + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -43,6 +45,8 @@ class GradWrap(nn.Cell): return C.grad_all(self.network)(x, y, b) # model_parallel test + + def test_matmul_prelu(): class Net(nn.Cell): def __init__(self): @@ -73,5 +77,3 @@ def test_matmul_prelu(): assert v == [[16, 1, 1, 1], [1]] elif re.search('Mul-op', k) is not None: assert v == [[16, 1, 1, 1], [16, 1, 1, 1]] - - diff --git a/tests/ut/python/parallel/test_auto_parallel_onehot.py b/tests/ut/python/parallel/test_auto_parallel_onehot.py index ebf9e40546..175e12aec1 100644 --- a/tests/ut/python/parallel/test_auto_parallel_onehot.py +++ b/tests/ut/python/parallel/test_auto_parallel_onehot.py @@ -26,7 +26,6 @@ from tests.dataset_mock import MindData from mindspore.train import Model, ParallelMode from mindspore.nn.optim.momentum import Momentum - context.set_context(mode=context.GRAPH_MODE) @@ -70,6 +69,7 @@ class GradWrap(nn.Cell): def construct(self, x, y, b): return C.grad_all(self.network)(x, y, b) + def test_auto_parallel_arithmetic(): class Net(nn.Cell): def __init__(self): @@ -125,4 +125,4 @@ def test_auto_parallel_arithmetic_model(): opt = Momentum(net.trainable_params(), 0.1, 0.9) model = Model(net, optimizer=opt) - model.train(2, dataset, dataset_sink_mode=False) \ No newline at end of file + model.train(2, dataset, dataset_sink_mode=False) diff --git a/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py b/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py index 8b2840a9b0..d179c94f27 100644 --- a/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py +++ b/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py @@ -26,6 +26,7 @@ from mindspore import Tensor, Parameter from mindspore.parallel._utils import _reset_op_id as reset_op_id from mindspore.parallel import set_algo_parameters + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -36,6 +37,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x, y, z, w) return self.loss(predict) + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -45,6 +47,8 @@ class GradWrap(nn.Cell): return C.grad_all(self.network)(x, y, z, w) # model_parallel test + + def test_common_parameter(): class Net(nn.Cell): def __init__(self): @@ -56,7 +60,6 @@ def test_common_parameter(): self.cast1 = P.Cast() self.cast2 = P.Cast() - def construct(self, x, y, z, w): m1_result = self.matmul1(x, self.cast1(self.weight1, mstype.float32)) m2_result = self.matmul2(z, self.cast2(self.weight1, mstype.float32)) @@ -66,14 +69,13 @@ def test_common_parameter(): size = 8 context.set_auto_parallel_context(device_num=size, global_rank=0) - + set_algo_parameters(elementwise_op_strategy_follow=True) x = Tensor(np.ones([64, 64]), dtype=ms.float32) y = Tensor(np.ones([64, 64]), dtype=ms.float32) z = Tensor(np.ones([64, 64]), dtype=ms.float32) w = Tensor(np.ones([64, 64]), dtype=ms.float32) - net = NetWithLoss(Net()) context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() diff --git a/tests/ut/python/parallel/test_auto_parallel_partial_strategy.py b/tests/ut/python/parallel/test_auto_parallel_partial_strategy.py index 217e13586d..96f5205aff 100644 --- a/tests/ut/python/parallel/test_auto_parallel_partial_strategy.py +++ b/tests/ut/python/parallel/test_auto_parallel_partial_strategy.py @@ -22,6 +22,7 @@ import mindspore as ms from mindspore.common.api import _executor from mindspore.ops import composite as C + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -32,6 +33,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x, y, z, w, b) return self.loss(predict) + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -41,6 +43,8 @@ class GradWrap(nn.Cell): return C.grad_all(self.network)(x, y, z, w, b) # model_parallel test + + def test_four_matmul_linear(): class Net(nn.Cell): def __init__(self, strategy1): diff --git a/tests/ut/python/parallel/test_auto_parallel_reduce_method.py b/tests/ut/python/parallel/test_auto_parallel_reduce_method.py index 560e5abc14..e53354d492 100644 --- a/tests/ut/python/parallel/test_auto_parallel_reduce_method.py +++ b/tests/ut/python/parallel/test_auto_parallel_reduce_method.py @@ -72,6 +72,7 @@ def test_sum_mul(): b = Tensor(np.ones([32, 64]), dtype=ms.float32) compile(net, x, y, b) + def test_sum_mul2(): class Net(nn.Cell): def __init__(self): @@ -95,6 +96,7 @@ def test_sum_mul2(): b = Tensor(np.ones([64, 64]), dtype=ms.float32) compile(net, x, y, b) + def test_sum_mul3(): class Net(nn.Cell): def __init__(self): diff --git a/tests/ut/python/parallel/test_auto_parallel_reshape.py b/tests/ut/python/parallel/test_auto_parallel_reshape.py index bb2116eec2..e41138bd0d 100644 --- a/tests/ut/python/parallel/test_auto_parallel_reshape.py +++ b/tests/ut/python/parallel/test_auto_parallel_reshape.py @@ -34,6 +34,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x) return self.loss(predict) + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -42,6 +43,7 @@ class GradWrap(nn.Cell): def construct(self, x): return C.grad_all(self.network)(x) + # core dump, step_auto_parallel should SetInputs for transpose axis def test_reshape_matmul(): class Net(nn.Cell): @@ -58,13 +60,14 @@ def test_reshape_matmul(): size = 8 context.set_auto_parallel_context(device_num=size, global_rank=0) - x = Tensor(np.ones([8*size, 28, 1, 1]), dtype=ms.float32) + x = Tensor(np.ones([8 * size, 28, 1, 1]), dtype=ms.float32) net = GradWrap(NetWithLoss(Net())) context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() _executor.compile(net, x) + def test_reshape_auto_1(): class Net(nn.Cell): def __init__(self): @@ -82,13 +85,14 @@ def test_reshape_auto_1(): size = 8 context.set_auto_parallel_context(device_num=size, global_rank=0) - x = Tensor(np.ones([8*size, 28, 1, 1]), dtype=ms.float32) + x = Tensor(np.ones([8 * size, 28, 1, 1]), dtype=ms.float32) net = GradWrap(NetWithLoss(Net())) context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() _executor.compile(net, x) + def test_reshape_auto_2(): class Net(nn.Cell): def __init__(self): @@ -96,7 +100,7 @@ def test_reshape_auto_2(): self.relu = P.ReLU() self.reshape = P.Reshape() self.matmul = P.MatMul() - self.add_weight = Parameter(Tensor(np.ones([128, 32]), dtype=ms.float32), name="weight1") + self.add_weight = Parameter(Tensor(np.ones([128, 32]), dtype=ms.float32), name="weight1") self.matmul_weight = Parameter(Tensor(np.ones([28, 64]), dtype=ms.float32), name="weight") def construct(self, x): @@ -109,13 +113,14 @@ def test_reshape_auto_2(): size = 8 context.set_auto_parallel_context(device_num=size, global_rank=0) - x = Tensor(np.ones([8*size, 28, 1, 1]), dtype=ms.float32) + x = Tensor(np.ones([8 * size, 28, 1, 1]), dtype=ms.float32) net = GradWrap(NetWithLoss(Net())) context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() _executor.compile(net, x) + def test_reshape_auto_3(): class Net(nn.Cell): def __init__(self): @@ -133,13 +138,14 @@ def test_reshape_auto_3(): size = 8 context.set_auto_parallel_context(device_num=size, global_rank=0) - x = Tensor(np.ones([8*size, 28]), dtype=ms.float32) + x = Tensor(np.ones([8 * size, 28]), dtype=ms.float32) net = GradWrap(NetWithLoss(Net())) context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() _executor.compile(net, x) + def test_reshape_auto_4(): class Net(nn.Cell): def __init__(self): @@ -147,7 +153,7 @@ def test_reshape_auto_4(): self.relu = P.ReLU() self.reshape = P.Reshape() self.matmul = P.MatMul() - self.matmul_weight = Parameter(Tensor(np.ones([28*64]), dtype=ms.float32), name="weight") + self.matmul_weight = Parameter(Tensor(np.ones([28 * 64]), dtype=ms.float32), name="weight") def construct(self, x): out = self.relu(x) @@ -158,7 +164,7 @@ def test_reshape_auto_4(): size = 8 context.set_auto_parallel_context(device_num=size, global_rank=0) - x = Tensor(np.ones([8*size, 28, 1, 1]), dtype=ms.float32) + x = Tensor(np.ones([8 * size, 28, 1, 1]), dtype=ms.float32) net = GradWrap(NetWithLoss(Net())) context.set_auto_parallel_context(parallel_mode="auto_parallel") @@ -192,29 +198,30 @@ def test_reshape_auto_5(): self.mul = P.Mul() self.reshape = P.Reshape() self.reduce_sum = P.ReduceSum() - self.wide_w = Parameter(Tensor(np.ones([4, 1024*8, 64]), dtype=ms.float32), name="weight") + self.wide_w = Parameter(Tensor(np.ones([4, 1024 * 8, 64]), dtype=ms.float32), name="weight") def construct(self, x, y): - mask = self.reshape(y, (4, 1024*8, 1)) + mask = self.reshape(y, (4, 1024 * 8, 1)) w_id = self.relu(x) wx = self.mul(w_id, mask) - wide_out = self.reshape(self.reduce_sum(wx, 1), (-1,1)) + wide_out = self.reshape(self.reduce_sum(wx, 1), (-1, 1)) deep_id = x + self.wide_w vx = self.mul(deep_id, mask) - deep_in = self.reshape(vx, (-1, 1024*8*64)) + deep_in = self.reshape(vx, (-1, 1024 * 8 * 64)) out = wide_out + deep_in return out size = 8 context.set_auto_parallel_context(device_num=size, global_rank=0) - x = Tensor(np.ones([4, 1024*size, 1]), dtype=ms.float32) - y = Tensor(np.ones([4, 1024*size,]), dtype=ms.float32) + x = Tensor(np.ones([4, 1024 * size, 1]), dtype=ms.float32) + y = Tensor(np.ones([4, 1024 * size, ]), dtype=ms.float32) net = GradWrap(NetWithLoss(Net())) context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() _executor.compile(net, x, y) + def test_reshape_auto_6(): class NetWithLoss(nn.Cell): def __init__(self, network): @@ -245,7 +252,7 @@ def test_reshape_auto_6(): def construct(self, x, y): out1 = x + self.wide_w - w = self.reshape(self.wide_w, (4,1024)) + w = self.reshape(self.wide_w, (4, 1024)) out1 = self.reduce_mean(out1, 1) out1 = out1 - w out2 = self.mul(y, w) @@ -255,7 +262,7 @@ def test_reshape_auto_6(): size = 8 context.set_auto_parallel_context(device_num=size, global_rank=0) x = Tensor(np.ones([4, 1024, 1]), dtype=ms.float32) - y = Tensor(np.ones([4, 1024,]), dtype=ms.float32) + y = Tensor(np.ones([4, 1024, ]), dtype=ms.float32) net = GradWrap(NetWithLoss(Net())) context.set_auto_parallel_context(parallel_mode="auto_parallel") diff --git a/tests/ut/python/parallel/test_auto_parallel_rhombus.py b/tests/ut/python/parallel/test_auto_parallel_rhombus.py index 1d66aa3b5d..83006a12e8 100644 --- a/tests/ut/python/parallel/test_auto_parallel_rhombus.py +++ b/tests/ut/python/parallel/test_auto_parallel_rhombus.py @@ -22,6 +22,7 @@ import mindspore as ms from mindspore.common.api import _executor from mindspore.ops import composite as C + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -32,6 +33,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x, y, b) return self.loss(predict) + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -71,6 +73,7 @@ def test_rhombus1(): context.set_auto_parallel_context(parallel_mode="auto_parallel") compile(net, x, y, b) + def test_rhombus2(): class Net(nn.Cell): def __init__(self): @@ -101,6 +104,7 @@ def test_rhombus2(): context.set_auto_parallel_context(parallel_mode="auto_parallel") compile(net, x, y, b) + def test_rhombus3(): class Net(nn.Cell): def __init__(self): diff --git a/tests/ut/python/parallel/test_auto_parallel_softmax_loss.py b/tests/ut/python/parallel/test_auto_parallel_softmax_loss.py index 712e4c19f7..fa8dbc91d0 100644 --- a/tests/ut/python/parallel/test_auto_parallel_softmax_loss.py +++ b/tests/ut/python/parallel/test_auto_parallel_softmax_loss.py @@ -22,6 +22,7 @@ import mindspore as ms from mindspore.common.api import _executor from mindspore.ops import composite as C + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() diff --git a/tests/ut/python/parallel/test_auto_parallel_transformer.py b/tests/ut/python/parallel/test_auto_parallel_transformer.py index 88cd322c77..3f47612681 100644 --- a/tests/ut/python/parallel/test_auto_parallel_transformer.py +++ b/tests/ut/python/parallel/test_auto_parallel_transformer.py @@ -21,6 +21,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss from mindspore.ops import composite as C + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -31,6 +32,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x) return self.loss(predict) + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -39,6 +41,7 @@ class GradWrap(nn.Cell): def construct(self, x): return C.grad_all(self.network)(x) + class CustomDense(nn.Cell): def __init__(self, row, column): super(CustomDense, self).__init__() @@ -79,6 +82,7 @@ class DenseMutMulNet(nn.Cell): s = self.fc4(s) return s + class MultiTransformer(nn.Cell): def __init__(self, layer_nums=1): super(MultiTransformer, self).__init__() @@ -95,10 +99,11 @@ class MultiTransformer(nn.Cell): out = self.layer(x) return out + def test_dmnet_train_step(): size = 8 context.set_auto_parallel_context(device_num=size, global_rank=0) - + input = Tensor(np.ones([4096, 4096]).astype(np.float32) * 0.01) net = GradWrap(NetWithLoss(MultiTransformer())) context.set_auto_parallel_context(parallel_mode="auto_parallel") diff --git a/tests/ut/python/parallel/test_auto_parallel_transpose.py b/tests/ut/python/parallel/test_auto_parallel_transpose.py index 821546330c..357c88cec3 100644 --- a/tests/ut/python/parallel/test_auto_parallel_transpose.py +++ b/tests/ut/python/parallel/test_auto_parallel_transpose.py @@ -34,6 +34,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x, y, b) return self.loss(predict) + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -42,6 +43,7 @@ class GradWrap(nn.Cell): def construct(self, x, y, b): return C.grad_all(self.network)(x, y, b) + # core dump, step_auto_parallel should SetInputs for transpose axis def test_two_matmul_transpose(): class Net(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_tuple_depend.py b/tests/ut/python/parallel/test_auto_parallel_tuple_depend.py index fba9f7855a..2ef21c8904 100644 --- a/tests/ut/python/parallel/test_auto_parallel_tuple_depend.py +++ b/tests/ut/python/parallel/test_auto_parallel_tuple_depend.py @@ -43,10 +43,12 @@ class GradWrap(nn.Cell): def construct(self, x, y, b): return C.grad_all(self.network)(x, y, b) + def bn_with_initialize(out_channels): bn = nn.BatchNorm2d(out_channels, momentum=0.1, eps=1e-5) return bn + # model_parallel test def test_virtual_dataset_3_input(): class Net(nn.Cell): @@ -65,7 +67,6 @@ def test_virtual_dataset_3_input(): out = self.matmul2(out, b) return out - net = GradWrap(NetWithLoss(Net())) context.set_auto_parallel_context(parallel_mode="auto_parallel") context.set_auto_parallel_context(device_num=8, global_rank=0) diff --git a/tests/ut/python/parallel/test_auto_parallel_two_bn.py b/tests/ut/python/parallel/test_auto_parallel_two_bn.py index 41219a1e42..6cd2cbfdc4 100644 --- a/tests/ut/python/parallel/test_auto_parallel_two_bn.py +++ b/tests/ut/python/parallel/test_auto_parallel_two_bn.py @@ -10,6 +10,7 @@ from mindspore.parallel import set_algo_parameters from mindspore.parallel._utils import _reset_op_id as reset_op_id import re + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -20,6 +21,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x) return self.loss(predict) + class Blockcell(nn.Cell): def __init__(self): super(Blockcell, self).__init__() @@ -29,9 +31,11 @@ class Blockcell(nn.Cell): out = self.bn(x) return out + def getBlock(): return Blockcell() + def test_two_bn(): class Net(nn.Cell): def __init__(self): diff --git a/tests/ut/python/parallel/test_auto_parallel_two_matmul.py b/tests/ut/python/parallel/test_auto_parallel_two_matmul.py index 443567da58..05159d74a7 100644 --- a/tests/ut/python/parallel/test_auto_parallel_two_matmul.py +++ b/tests/ut/python/parallel/test_auto_parallel_two_matmul.py @@ -25,6 +25,7 @@ from mindspore.parallel import _cost_model_context as cost_model_context from mindspore.parallel import set_algo_parameters, get_algo_parameters, reset_algo_parameters from mindspore.parallel._utils import _reset_op_id as reset_op_id + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -35,6 +36,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x, y, b) return self.loss(predict) + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -44,6 +46,8 @@ class GradWrap(nn.Cell): return C.grad_all(self.network)(x, y, b) # model_parallel test + + def test_two_matmul(): class Net(nn.Cell): def __init__(self): @@ -58,7 +62,7 @@ def test_two_matmul(): size = 16 context.set_auto_parallel_context(device_num=size, global_rank=0) - cost_model_context.set_cost_model_context(device_memory_capacity= 32.0 * 1024.0 * 1024.0 * 1024.0, + cost_model_context.set_cost_model_context(device_memory_capacity=32.0 * 1024.0 * 1024.0 * 1024.0, costmodel_alpha=1.0, costmodel_beta=60.0, costmodel_gamma=0.1, @@ -96,7 +100,6 @@ def test_two_matmul(): costmodel_communi_bias = cost_model_context.get_cost_model_context("costmodel_communi_bias") assert costmodel_communi_bias == 1024.0 - set_algo_parameters(tensor_slice_align_enable=False, tensor_slice_align_size=32, fully_use_devices=False, elementwise_op_strategy_follow=False) para_slice_align_enable = get_algo_parameters("tensor_slice_align_enable") @@ -126,9 +129,9 @@ def test_two_matmul(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() reset_op_id() - + _executor.compile(net, x, y, b, phase='train') strategies = _executor._get_strategy(net) expected_strategies = {'Default/network-Net/MatMul-op0': [[16, 1], [1, 1]], - 'Default/network-Net/MatMul-op1': [[16, 1], [1, 1]]} + 'Default/network-Net/MatMul-op1': [[16, 1], [1, 1]]} assert strategies == expected_strategies diff --git a/tests/ut/python/parallel/test_auto_parallel_two_partial_matmul.py b/tests/ut/python/parallel/test_auto_parallel_two_partial_matmul.py index 8e6b006db8..894a1f0df8 100644 --- a/tests/ut/python/parallel/test_auto_parallel_two_partial_matmul.py +++ b/tests/ut/python/parallel/test_auto_parallel_two_partial_matmul.py @@ -22,6 +22,7 @@ import mindspore as ms from mindspore.common.api import _executor from mindspore.ops import composite as C + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -32,6 +33,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x, y) return self.loss(predict) + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -41,6 +43,8 @@ class GradWrap(nn.Cell): return C.grad_all(self.network)(x, y) # model_parallel test + + def test_four_matmul_linear(): class Net(nn.Cell): def __init__(self, strategy1): @@ -63,4 +67,4 @@ def test_four_matmul_linear(): net = GradWrap(NetWithLoss(Net(strategy1))) context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() - _executor.compile(net, x, y) \ No newline at end of file + _executor.compile(net, x, y) diff --git a/tests/ut/python/parallel/test_auto_parallel_zig_zag.py b/tests/ut/python/parallel/test_auto_parallel_zig_zag.py index 5184d308ee..3453a12b3f 100644 --- a/tests/ut/python/parallel/test_auto_parallel_zig_zag.py +++ b/tests/ut/python/parallel/test_auto_parallel_zig_zag.py @@ -22,6 +22,7 @@ import mindspore as ms from mindspore.common.api import _executor from mindspore.ops import composite as C + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -32,6 +33,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x, y, z, w, a) return self.loss(predict) + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -41,6 +43,8 @@ class GradWrap(nn.Cell): return C.grad_all(self.network)(x, y, z, w, a) # model_parallel test + + def test_zig_zag_graph(): class Net(nn.Cell): def __init__(self): diff --git a/tests/ut/python/parallel/test_auto_star_elimination.py b/tests/ut/python/parallel/test_auto_star_elimination.py index 100f24dda3..2bf52e3308 100644 --- a/tests/ut/python/parallel/test_auto_star_elimination.py +++ b/tests/ut/python/parallel/test_auto_star_elimination.py @@ -27,6 +27,7 @@ from mindspore import context from mindspore.common.api import _executor import mindspore as ms + class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() @@ -37,6 +38,7 @@ class NetWithLoss(nn.Cell): predict = self.network(x, y) return self.loss(predict) + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -79,11 +81,11 @@ class MarginCE(_Loss): def test_marin_loss(): context.set_auto_parallel_context(device_num=4, global_rank=0) - + x = Tensor(np.ones([512, 512]), dtype=ms.float32) y = Tensor(np.ones([512, 512]), dtype=ms.float32) net = GradWrap(NetWithLoss(MarginCE())) context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() - _executor.compile(net, x, y) \ No newline at end of file + _executor.compile(net, x, y) diff --git a/tests/ut/python/parallel/test_batch_matmul.py b/tests/ut/python/parallel/test_batch_matmul.py index e230105ed9..82b9a1b6f1 100644 --- a/tests/ut/python/parallel/test_batch_matmul.py +++ b/tests/ut/python/parallel/test_batch_matmul.py @@ -44,7 +44,7 @@ def compile(net): optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() - _executor.compile(train_net, _x, _b) + _executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_batch_parallel.py b/tests/ut/python/parallel/test_batch_parallel.py index 9e09d66f45..e01afe14ab 100644 --- a/tests/ut/python/parallel/test_batch_parallel.py +++ b/tests/ut/python/parallel/test_batch_parallel.py @@ -23,7 +23,6 @@ from mindspore.common.api import _executor from mindspore.ops import composite as C - class NetWithLoss(nn.Cell): def __init__(self, network): super(NetWithLoss, self).__init__() diff --git a/tests/ut/python/parallel/test_batch_parallel_tensoradd.py b/tests/ut/python/parallel/test_batch_parallel_tensoradd.py index 9918d72bec..13bbdd69d8 100644 --- a/tests/ut/python/parallel/test_batch_parallel_tensoradd.py +++ b/tests/ut/python/parallel/test_batch_parallel_tensoradd.py @@ -42,6 +42,7 @@ class GradWrap(nn.Cell): def construct(self, x, y, b): return C.grad_all(self.network)(x, y, b) + def test_matmul_add(): class Net(nn.Cell): def __init__(self): @@ -55,7 +56,7 @@ def test_matmul_add(): return out context.set_auto_parallel_context(device_num=8, global_rank=0) - + net = GradWrap(NetWithLoss(Net())) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() diff --git a/tests/ut/python/parallel/test_batchnorm_batch_parallel.py b/tests/ut/python/parallel/test_batchnorm_batch_parallel.py index 95a7710792..78887818f1 100644 --- a/tests/ut/python/parallel/test_batchnorm_batch_parallel.py +++ b/tests/ut/python/parallel/test_batchnorm_batch_parallel.py @@ -28,12 +28,11 @@ from mindspore.ops import operations as P from mindspore.common.parameter import Parameter from tests.dataset_mock import MindData - dev_num = 8 -strategy_no_weight = ((dev_num, 1, 1, 1), ) +strategy_no_weight = ((dev_num, 1, 1, 1),) strategy_weight = ((dev_num, 1, 1, 1), (1, 1, 1, 1)) -strategy_bn = ((dev_num, 1, 1, 1), (1, ), (1, )) -strategy_fc_weight_bias = ((dev_num, 1), (1, 1), (1, )) +strategy_bn = ((dev_num, 1, 1, 1), (1,), (1,)) +strategy_fc_weight_bias = ((dev_num, 1), (1, 1), (1,)) class DatasetLenet(MindData): @@ -94,7 +93,7 @@ class ResNet(Cell): def __init__(self, num_classes=100): super(ResNet, self).__init__() - strategy_no_weight = ((dev_num, 1, 1, 1), ) + strategy_no_weight = ((dev_num, 1, 1, 1),) self.conv1 = conv7x7(3, 64, stride=2, padding=3) self.bn1 = bn_with_initialize(64) self.relu = ReLU() diff --git a/tests/ut/python/parallel/test_bn_prelu_cell.py b/tests/ut/python/parallel/test_bn_prelu_cell.py index 5971db7027..78cd8b20bf 100644 --- a/tests/ut/python/parallel/test_bn_prelu_cell.py +++ b/tests/ut/python/parallel/test_bn_prelu_cell.py @@ -53,6 +53,7 @@ class Dataset(MindData): class FusedBatchNorm(nn.Cell): """Batch Normalization base class.""" + def __init__(self, num_features, eps=1e-5, @@ -87,9 +88,9 @@ class FusedBatchNorm(nn.Cell): epsilon=self.eps) self.sub_mean = P.Sub().set_strategy(((1), (1))) self.sub_var = P.Sub().set_strategy(((1), (1))) - self.mul_mean = P.Mul().set_strategy(((1, ), ())) - self.mul_var = P.Mul().set_strategy(((1, ), ())) - self.assign_sub_mean = P.AssignSub().set_strategy(((1, ), (1,))) + self.mul_mean = P.Mul().set_strategy(((1,), ())) + self.mul_var = P.Mul().set_strategy(((1,), ())) + self.assign_sub_mean = P.AssignSub().set_strategy(((1,), (1,))) self.assign_sub_var = P.AssignSub().set_strategy(((1), (1))) self.sub_mean2 = P.Sub().set_strategy(((1), (1))) self.sub_var2 = P.Sub().set_strategy(((1), (1))) @@ -138,7 +139,6 @@ class FusedBatchNorm(nn.Cell): self.moving_variance) - class PReLU(nn.Cell): """ PReLU activation function. @@ -158,6 +158,7 @@ class PReLU(nn.Cell): input_data = Tensor(np.random.rand(1, 33, 4, 4), ms.float32) output = prelu.construct(input_data) """ + def __init__(self, channel=1, w=0.25): super(PReLU, self).__init__() if isinstance(w, (np.float32, float)): @@ -169,7 +170,7 @@ class PReLU(nn.Cell): if not isinstance(w, Tensor): w = Tensor(w) - self.w = Parameter(initializer(w, [channel,]), name='a') + self.w = Parameter(initializer(w, [channel, ]), name='a') self.prelu = P.PReLU() self.relu = P.ReLU().set_strategy(((1))) @@ -184,7 +185,6 @@ class BNNet(nn.Cell): self.bn = FusedBatchNorm(512) self.prelu = PReLU(512) - def construct(self, x): x = self.bn(x) x = self.prelu(x) diff --git a/tests/ut/python/parallel/test_bool_grad.py b/tests/ut/python/parallel/test_bool_grad.py index 491707103b..2334ebc430 100644 --- a/tests/ut/python/parallel/test_bool_grad.py +++ b/tests/ut/python/parallel/test_bool_grad.py @@ -23,7 +23,6 @@ from mindspore import context from mindspore.train import Model, ParallelMode from mindspore.nn.optim import Momentum - context.set_context(mode=context.GRAPH_MODE) @@ -52,8 +51,8 @@ class CommonNet(nn.Cell): def __init__(self): super(CommonNet, self).__init__() self.weight = Parameter(Tensor(np.ones([256, 64]), dtype=ms.float32), name="mul_weight") - self.logicalnot = P.LogicalNot().set_strategy(((4,2),)) - self.equal = P.Equal().set_strategy(((4,2),(4,2))) + self.logicalnot = P.LogicalNot().set_strategy(((4, 2),)) + self.equal = P.Equal().set_strategy(((4, 2), (4, 2))) def construct(self, x, label): x = self.equal(x, self.weight) @@ -65,7 +64,7 @@ def common_net(): epoch_size = 1 context.reset_auto_parallel_context() - + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8) predict = Tensor(np.ones([32, 64]), dtype=ms.float32) label = Tensor(np.ones([32]), dtype=ms.int32) @@ -79,4 +78,3 @@ def common_net(): def test_bool_grad(): common_net() - diff --git a/tests/ut/python/parallel/test_combined_tensor.py b/tests/ut/python/parallel/test_combined_tensor.py index ac70076c67..046c67db00 100644 --- a/tests/ut/python/parallel/test_combined_tensor.py +++ b/tests/ut/python/parallel/test_combined_tensor.py @@ -20,27 +20,27 @@ def test_reshape_param_data(): expected_tensor = Tensor([[1, 2, 3, 4], [5, 6, 7, 8]]) dev_mat = [2, 2] tensor_map = [0, 1] - input_tensor = Tensor([[1, 2],[5, 6],[3, 4],[7, 8]]) + input_tensor = Tensor([[1, 2], [5, 6], [3, 4], [7, 8]]) tensor = _reshape_param_data(input_tensor, dev_mat, tensor_map) if expected_tensor.__str__() != tensor.__str__(): raise AssertionError tensor_map = [1, -1] - input_tensor = Tensor([[1, 2, 3, 4],[1, 2, 3, 4],[5, 6, 7, 8],[5, 6, 7, 8]]) + input_tensor = Tensor([[1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [5, 6, 7, 8]]) tensor = _reshape_param_data(input_tensor, dev_mat, tensor_map) if expected_tensor.__str__() != tensor.__str__(): raise AssertionError - expected_tensor = Tensor([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],\ + expected_tensor = Tensor([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], \ [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]) - input_tensor = Tensor([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],\ - [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],\ - [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],\ - [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],\ - [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],\ - [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],\ - [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],\ + input_tensor = Tensor([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], \ + [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], \ + [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], \ + [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], \ + [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], \ + [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], \ + [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], \ [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]) dev_mat = [4] @@ -49,7 +49,6 @@ def test_reshape_param_data(): if expected_tensor.__str__() != tensor.__str__(): raise AssertionError - if __name__ == '__main__': test_reshape_param_data() diff --git a/tests/ut/python/parallel/test_comparison_function_info.py b/tests/ut/python/parallel/test_comparison_function_info.py index 6f5ea4e26a..5194152009 100644 --- a/tests/ut/python/parallel/test_comparison_function_info.py +++ b/tests/ut/python/parallel/test_comparison_function_info.py @@ -154,7 +154,7 @@ def test_matmul_maximum_broadcast(): context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") strategy1 = ((2, 2), (2, 2)) - strategy2 = ((4, 2), (2, )) + strategy2 = ((4, 2), (2,)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) x = Tensor(np.ones([64, 32]), dtype=ms.float32) @@ -223,7 +223,7 @@ def test_matmul_minimum_broadcast(): context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") strategy1 = ((2, 2), (2, 2)) - strategy2 = ((4, 2), (2, )) + strategy2 = ((4, 2), (2,)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) x = Tensor(np.ones([64, 32]), dtype=ms.float32) diff --git a/tests/ut/python/parallel/test_dataset.py b/tests/ut/python/parallel/test_dataset.py index f4bfb41869..7e35207ac8 100644 --- a/tests/ut/python/parallel/test_dataset.py +++ b/tests/ut/python/parallel/test_dataset.py @@ -41,4 +41,3 @@ def test_virtual_dataset(): z = Tensor(np.ones([64, 64], dtype=np.float32)) network = VirtualDatasetNet() _executor.compile(network, x, y, z) - diff --git a/tests/ut/python/parallel/test_dataset_interface.py b/tests/ut/python/parallel/test_dataset_interface.py index 87cd9cac00..ebfae6939b 100644 --- a/tests/ut/python/parallel/test_dataset_interface.py +++ b/tests/ut/python/parallel/test_dataset_interface.py @@ -25,7 +25,6 @@ from mindspore.train.loss_scale_manager import DynamicLossScaleManager from mindspore.ops import composite as C, functional as F, operations as P from mindspore.common.parameter import Parameter, ParameterTuple - context.set_context(mode=context.GRAPH_MODE) @@ -95,7 +94,7 @@ def loss_scale_manager_common(strategy1): def fixme_test_dataset_interface_sens_scalar(): # With error: "The type of sens node is not Tensor or Parameter, it is unsupported now." - strategy1 = ((8, 1), ) + strategy1 = ((8, 1),) loss_scale_manager_common(strategy1) @@ -131,7 +130,7 @@ def loss_scale_manager_sens(strategy1, sens): def test_dataset_interface_sens_shape_not_equal_loss(): - strategy1 = ((8, 1), ) + strategy1 = ((8, 1),) sens = Tensor(np.ones([256, 1024]), dtype=ms.float32) try: loss_scale_manager_sens(strategy1, sens) @@ -140,7 +139,7 @@ def test_dataset_interface_sens_shape_not_equal_loss(): def test_dataset_interface_sens_shape_equal_loss(): - strategy1 = ((4, 2), ) + strategy1 = ((4, 2),) sens = Tensor(np.ones([256, 256]), dtype=ms.float32) loss_scale_manager_sens(strategy1, sens) @@ -158,7 +157,7 @@ def test_input_not_in_parameter_layotu_dict(): x = self.transpose1(x, (1, 0)) return x - strategy1 = ((8, 1), ) + strategy1 = ((8, 1),) device_num = 8 context.reset_auto_parallel_context() context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, device_num=device_num) @@ -167,6 +166,3 @@ def test_input_not_in_parameter_layotu_dict(): net = Net(strategy1) net.set_train() net(predict, b) - - - diff --git a/tests/ut/python/parallel/test_dataset_util.py b/tests/ut/python/parallel/test_dataset_util.py index c79932a898..21d90c7842 100644 --- a/tests/ut/python/parallel/test_dataset_util.py +++ b/tests/ut/python/parallel/test_dataset_util.py @@ -17,6 +17,7 @@ from mindspore.train._utils import _to_full_shapes, _to_full_tensor from mindspore import Tensor import mindspore as ms + def test_to_full_shapes(): device_num = 16 shapes = [[32, 128], [12], [24, 1, 12]] @@ -25,26 +26,26 @@ def test_to_full_shapes(): def test_to_full_tensor_1(): - elem = Tensor([[1,2,3], [4,5,6]], dtype=ms.float32) + elem = Tensor([[1, 2, 3], [4, 5, 6]], dtype=ms.float32) device_num = 4 global_rank = 2 full_tensor = _to_full_tensor(elem, device_num, global_rank, scaling_sens=None) - expect = ([[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1,2,3], [4,5,6], [0, 0, 0], [0, 0, 0]]) + expect = ([[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 2, 3], [4, 5, 6], [0, 0, 0], [0, 0, 0]]) expect_tensor = Tensor(expect, dtype=ms.float32) assert (full_tensor[0] == expect_tensor) def test_to_full_tensor_2(): - elem0 = Tensor([[1,2,3], [4,5,6]], dtype=ms.float32) + elem0 = Tensor([[1, 2, 3], [4, 5, 6]], dtype=ms.float32) elem1 = Tensor([[1], [4]], dtype=ms.int32) elem = (elem0, elem1,) device_num = 4 global_rank = 2 full_tensor = _to_full_tensor(elem, device_num, global_rank, scaling_sens=None) - expect0 = ([[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1,2,3], [4,5,6], [0, 0, 0], [0, 0, 0]]) + expect0 = ([[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 2, 3], [4, 5, 6], [0, 0, 0], [0, 0, 0]]) expect_tensor0 = Tensor(expect0, dtype=ms.float32) expect1 = ([[0], [0], [0], [0], [1], [4], [0], [0]]) expect_tensor1 = Tensor(expect1, dtype=ms.int32) @@ -54,14 +55,14 @@ def test_to_full_tensor_2(): def test_to_full_tensor_sens_2(): - elem0 = Tensor([[1,2,3], [4,5,6]], dtype=ms.float32) + elem0 = Tensor([[1, 2, 3], [4, 5, 6]], dtype=ms.float32) elem1 = Tensor([[1], [4]], dtype=ms.int32) elem = (elem0, elem1,) device_num = 4 global_rank = 2 full_tensor = _to_full_tensor(elem, device_num, global_rank, scaling_sens=0.1) - expect0 = ([[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1,2,3], [4,5,6], [0, 0, 0], [0, 0, 0]]) + expect0 = ([[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 2, 3], [4, 5, 6], [0, 0, 0], [0, 0, 0]]) expect_tensor0 = Tensor(expect0, dtype=ms.float32) expect1 = ([[0], [0], [0], [0], [1], [4], [0], [0]]) expect_tensor1 = Tensor(expect1, dtype=ms.int32) @@ -69,8 +70,3 @@ def test_to_full_tensor_sens_2(): expect_tensors = (expect_tensor0, expect_tensor1, expect_tensor_sens) assert (full_tensor == expect_tensors) - - - - - diff --git a/tests/ut/python/parallel/test_different_type_for_div_op.py b/tests/ut/python/parallel/test_different_type_for_div_op.py index 2268437c82..14ccee61a2 100644 --- a/tests/ut/python/parallel/test_different_type_for_div_op.py +++ b/tests/ut/python/parallel/test_different_type_for_div_op.py @@ -45,12 +45,12 @@ def test_sum_as_loss_float16(): def construct(self, x, y, bias): out = self.fc_nobias(x, y) - out = self.reduce_sum(out, (0,1)) + out = self.reduce_sum(out, (0, 1)) return out context.set_auto_parallel_context(device_num=16, global_rank=0) strategy0 = ((4, 1), (4, 1)) - strategy1 = ((4, 1), ) + strategy1 = ((4, 1),) net = GradWrap(Net(strategy0, strategy1)) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -69,12 +69,12 @@ def test_sum_as_loss_float32(): def construct(self, x, y, bias): out = self.fc_nobias(x, y) - out = self.reduce_sum(out, (0,1)) + out = self.reduce_sum(out, (0, 1)) return out context.set_auto_parallel_context(device_num=16, global_rank=0) strategy0 = ((4, 1), (4, 1)) - strategy1 = ((4, 1), ) + strategy1 = ((4, 1),) net = GradWrap(Net(strategy0, strategy1)) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -93,12 +93,12 @@ def test_sum_as_loss_int32(): def construct(self, x, y, bias): out = self.fc_nobias(x, y) - out = self.reduce_sum(out, (0,1)) + out = self.reduce_sum(out, (0, 1)) return out context.set_auto_parallel_context(device_num=16, global_rank=0) strategy0 = ((4, 1), (4, 1)) - strategy1 = ((4, 1), ) + strategy1 = ((4, 1),) net = GradWrap(Net(strategy0, strategy1)) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") diff --git a/tests/ut/python/parallel/test_element_wise_function.py b/tests/ut/python/parallel/test_element_wise_function.py index 7cf2253184..aefd980b0d 100644 --- a/tests/ut/python/parallel/test_element_wise_function.py +++ b/tests/ut/python/parallel/test_element_wise_function.py @@ -90,7 +90,7 @@ def test_matmul_exp(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((2, 2), (2, 2)) - strategy2 = ((4, 2), ) + strategy2 = ((4, 2),) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -116,7 +116,7 @@ def test_matmul_log(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((2, 2), (2, 2)) - strategy2 = ((4, 2), ) + strategy2 = ((4, 2),) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -142,7 +142,7 @@ def test_matmul_logical_not(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((2, 2), (2, 2)) - strategy2 = ((4, 2), ) + strategy2 = ((4, 2),) strategy3 = ((4, 2), (4, 2)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -152,6 +152,7 @@ def test_matmul_logical_not(): b = Tensor(np.ones([128, 64]), dtype=ms.float32) compile(net, x, y, b) + def test_matmul_cast(): class Net(nn.Cell): def __init__(self, strategy1, strategy2, strategy3): @@ -168,7 +169,7 @@ def test_matmul_cast(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((2, 2), (2, 2)) - strategy2 = ((4, 2), ) + strategy2 = ((4, 2),) strategy3 = ((1, 4), (4, 2)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -196,7 +197,7 @@ def test_cast_before_mirror(): strategy1 = ((2, 2), (2, 2)) net = GradWrap(NetWithLoss(Net(strategy1))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - + x = Tensor(np.ones([128, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) b = Tensor(np.ones([64, 64]), dtype=ms.float16) @@ -220,7 +221,7 @@ def test_cast_before_mirror1(): strategy1 = ((2, 2), (2, 2)) net = GradWrap(NetWithLoss(Net(strategy1))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - + x = Tensor(np.ones([128, 32]), dtype=ms.float16) y = Tensor(np.ones([32, 64]), dtype=ms.float16) b = Tensor(np.ones([64, 64]), dtype=ms.float32) @@ -244,7 +245,7 @@ def test_cast_before_mirror2(): strategy1 = ((2, 2), (2, 2)) net = GradWrap(NetWithLoss(Net(strategy1))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - + x = Tensor(np.ones([128, 32]), dtype=ms.float16) y = Tensor(np.ones([32, 64]), dtype=ms.float16) b = Tensor(np.ones([64, 64]), dtype=ms.float32) @@ -268,7 +269,7 @@ def test_cast_before_mirror3(): strategy1 = ((2, 2), (2, 2)) net = GradWrap(NetWithLoss(Net(strategy1))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - + x = Tensor(np.ones([128, 32]), dtype=ms.float16) y = Tensor(np.ones([32, 64]), dtype=ms.float16) b = Tensor(np.ones([64, 64]), dtype=ms.float32) @@ -294,7 +295,7 @@ def test_mul_two_cast(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((2, 2), (2, 2)) strategy2 = ((8, 1), (8, 1)) - strategy3 = ((8, 1), ) + strategy3 = ((8, 1),) net = GradWrap(Net(strategy1, strategy2, strategy3)) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") diff --git a/tests/ut/python/parallel/test_expand_dims.py b/tests/ut/python/parallel/test_expand_dims.py index b35c36b7fa..6048eea731 100644 --- a/tests/ut/python/parallel/test_expand_dims.py +++ b/tests/ut/python/parallel/test_expand_dims.py @@ -57,14 +57,14 @@ def compile(net): optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() - _executor.compile(train_net, _x, _b) + _executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() def test_expand_dims_data_parallel(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) strategy1 = ((16, 1, 1), (16, 1, 1)) - strategy2 = ((16, 1, 1), ) + strategy2 = ((16, 1, 1),) strategy3 = ((16, 1, 1, 1), (16, 1, 1, 1)) net = Net(_w1, strategy1, strategy2, strategy3) compile(net) @@ -73,7 +73,7 @@ def test_expand_dims_data_parallel(): def test_expand_dims_model_parallel(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) strategy1 = ((1, 1, 16), (1, 1, 16)) - strategy2 = ((1, 1, 16), ) + strategy2 = ((1, 1, 16),) strategy3 = ((1, 1, 16, 1), (1, 1, 16, 1)) net = Net(_w1, strategy1, strategy2, strategy3) compile(net) @@ -82,7 +82,7 @@ def test_expand_dims_model_parallel(): def test_expand_dims_hybrid_parallel(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) strategy1 = ((2, 2, 4), (2, 2, 4)) - strategy2 = ((2, 2, 4), ) + strategy2 = ((2, 2, 4),) strategy3 = ((2, 2, 4, 1), (2, 2, 4, 1)) net = Net(_w1, strategy1, strategy2, strategy3) compile(net) @@ -97,7 +97,7 @@ def test_expand_dims_auto_parallel(): def test_expand_dims_repeat_calc(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) strategy1 = ((2, 2, 4), (2, 2, 4)) - strategy2 = ((1, 2, 2), ) + strategy2 = ((1, 2, 2),) strategy3 = ((2, 2, 4, 1), (2, 2, 4, 1)) net = Net(_w1, strategy1, strategy2, strategy3) compile(net) @@ -105,7 +105,7 @@ def test_expand_dims_repeat_calc(): def test_expand_dims_parameter(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) - strategy1 = ((1, 2, 2), ) + strategy1 = ((1, 2, 2),) strategy2 = ((2, 2, 4, 1), (2, 2, 4, 1)) net = Net2(_w1, strategy1, strategy2) compile(net) diff --git a/tests/ut/python/parallel/test_forward_graph.py b/tests/ut/python/parallel/test_forward_graph.py index 944cb5532c..577ee51a9b 100644 --- a/tests/ut/python/parallel/test_forward_graph.py +++ b/tests/ut/python/parallel/test_forward_graph.py @@ -40,14 +40,14 @@ _b = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) def compile(net): net.set_auto_parallel() - _executor.compile(net, _x, _b) + _executor.compile(net, _x, _b) context.reset_auto_parallel_context() def test_forward_graph_data_parallel(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) strategy1 = ((16, 1, 1), (16, 1, 1)) - strategy2 = ((16, 1, 1), ) + strategy2 = ((16, 1, 1),) net = Net(_w1, strategy1, strategy2) compile(net) @@ -55,7 +55,7 @@ def test_forward_graph_data_parallel(): def test_forward_graph_model_parallel(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) strategy1 = ((1, 1, 16), (1, 1, 16)) - strategy2 = ((1, 1, 16), ) + strategy2 = ((1, 1, 16),) net = Net(_w1, strategy1, strategy2) compile(net) @@ -63,7 +63,7 @@ def test_forward_graph_model_parallel(): def test_forward_graph_hybrid_parallel(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) strategy1 = ((2, 2, 4), (2, 2, 4)) - strategy2 = ((2, 2, 4), ) + strategy2 = ((2, 2, 4),) net = Net(_w1, strategy1, strategy2) compile(net) @@ -77,7 +77,6 @@ def test_forward_graph_auto_parallel(): def test_forward_graph_repeat_calc(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) strategy1 = ((2, 2, 4), (2, 2, 4)) - strategy2 = ((1, 2, 2), ) + strategy2 = ((1, 2, 2),) net = Net(_w1, strategy1, strategy2) compile(net) - diff --git a/tests/ut/python/parallel/test_gather_v2.py b/tests/ut/python/parallel/test_gather_v2.py index 793f3b91c4..7b3a91b64e 100644 --- a/tests/ut/python/parallel/test_gather_v2.py +++ b/tests/ut/python/parallel/test_gather_v2.py @@ -60,7 +60,7 @@ class Net(nn.Cell): def test_gatherv2_semi_auto0(): context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") - strategy1 = ((1, 8), ) + strategy1 = ((1, 8),) strategy2 = ((4, 2, 1), (4, 2, 1)) net = GradWrap(NetWithLoss(Net(0, strategy1, strategy2))) net.set_auto_parallel() @@ -69,9 +69,10 @@ def test_gatherv2_semi_auto0(): y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32) _executor.compile(net, x, y) + def test_gatherv2_semi_auto1(): context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") - strategy1 = ((8, 1), ) + strategy1 = ((8, 1),) strategy2 = ((4, 2, 1), (4, 2, 1)) net = GradWrap(NetWithLoss(Net(0, strategy1, strategy2))) net.set_auto_parallel() @@ -80,9 +81,10 @@ def test_gatherv2_semi_auto1(): y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32) _executor.compile(net, x, y) + def test_gatherv2_semi_auto2(): context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") - strategy1 = ((2, 4), ) + strategy1 = ((2, 4),) strategy2 = ((4, 2, 1), (4, 2, 1)) net = GradWrap(NetWithLoss(Net(0, strategy1, strategy2))) net.set_auto_parallel() @@ -91,9 +93,10 @@ def test_gatherv2_semi_auto2(): y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32) _executor.compile(net, x, y) + def test_gatherv2_semi_auto3(): context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") - strategy1 = ((1, 8), ) + strategy1 = ((1, 8),) strategy2 = ((4, 2, 1), (4, 2, 1)) net = GradWrap(NetWithLoss(Net(1, strategy1, strategy2))) net.set_auto_parallel() @@ -102,9 +105,10 @@ def test_gatherv2_semi_auto3(): y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) _executor.compile(net, x, y) + def test_gatherv2_semi_auto4(): context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") - strategy1 = ((8, 1), ) + strategy1 = ((8, 1),) strategy2 = ((4, 2, 1), (4, 2, 1)) net = GradWrap(NetWithLoss(Net(1, strategy1, strategy2))) net.set_auto_parallel() @@ -113,9 +117,10 @@ def test_gatherv2_semi_auto4(): y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) _executor.compile(net, x, y) + def test_gatherv2_semi_auto5(): context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") - strategy1 = ((2, 4), ) + strategy1 = ((2, 4),) strategy2 = ((4, 2, 1), (4, 2, 1)) net = GradWrap(NetWithLoss(Net(1, strategy1, strategy2))) net.set_auto_parallel() @@ -124,6 +129,7 @@ def test_gatherv2_semi_auto5(): y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) _executor.compile(net, x, y) + def test_gatherv2_semi_auto6(): context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") strategy2 = ((4, 2, 1), (4, 2, 1)) @@ -134,6 +140,7 @@ def test_gatherv2_semi_auto6(): y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32) _executor.compile(net, x, y) + def test_gatherv2_semi_auto7(): context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") strategy2 = ((4, 2, 1), (4, 2, 1)) @@ -144,9 +151,10 @@ def test_gatherv2_semi_auto7(): y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) _executor.compile(net, x, y) + def test_gatherv2_semi_auto8(): context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") - strategy1 = ((8, ), ) + strategy1 = ((8,),) strategy2 = ((4, 2), (4, 2)) net = GradWrap(NetWithLoss(Net(0, strategy1, strategy2))) net.set_auto_parallel() @@ -155,6 +163,7 @@ def test_gatherv2_semi_auto8(): y = Tensor(np.ones([64, 64]), dtype=ms.float32) _executor.compile(net, x, y) + def test_gatherv2_auto0(): context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="auto_parallel") net = GradWrap(NetWithLoss(Net(0))) @@ -163,6 +172,7 @@ def test_gatherv2_auto0(): y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32) _executor.compile(net, x, y) + def test_gatherv2_auto1(): context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="auto_parallel") net = GradWrap(NetWithLoss(Net(1))) diff --git a/tests/ut/python/parallel/test_gather_v2_primitive.py b/tests/ut/python/parallel/test_gather_v2_primitive.py index 3812ccd819..2174efc735 100644 --- a/tests/ut/python/parallel/test_gather_v2_primitive.py +++ b/tests/ut/python/parallel/test_gather_v2_primitive.py @@ -69,9 +69,9 @@ class GatherV2(_Loss): emb1_list = emb_list[0::2] emb2_list = emb_list[1::2] if index_dim == 2: - emb_list = np.arange(index_size*16) - emb1_list = np.reshape(emb_list[0::2], (int(index_size/2), 16)) - emb2_list = np.reshape(emb_list[1::2], (int(index_size/2), 16)) + emb_list = np.arange(index_size * 16) + emb1_list = np.reshape(emb_list[0::2], (int(index_size / 2), 16)) + emb2_list = np.reshape(emb_list[1::2], (int(index_size / 2), 16)) self.emb1_param = Tensor(emb1_list, dtype=mstype.int32) self.emb2_param = Tensor(emb2_list, dtype=mstype.int32) self.gatherv2 = P.GatherV2().set_strategy(strategy).add_prim_attr("data_parallel", True) @@ -199,9 +199,9 @@ class GatherV2Axis1(_Loss): emb1_list = emb_list[0::2] emb2_list = emb_list[1::2] if index_dim == 2: - emb_list = np.arange(index_size*index_size) - emb1_list = np.reshape(emb_list[0::2], (int(index_size/2), index_size)) - emb2_list = np.reshape(emb_list[1::2], (int(index_size/2), index_size)) + emb_list = np.arange(index_size * index_size) + emb1_list = np.reshape(emb_list[0::2], (int(index_size / 2), index_size)) + emb2_list = np.reshape(emb_list[1::2], (int(index_size / 2), index_size)) self.emb1_param = Tensor(emb1_list, dtype=mstype.int32) self.emb2_param = Tensor(emb2_list, dtype=mstype.int32) self.gatherv2 = P.GatherV2().set_strategy(strategy) @@ -231,4 +231,3 @@ def test_axis1_strategy1(): rank = 17 criterion = GatherV2Axis1(1, strategy=gather_v2_strategy, index_size=512) net_trains(gather_v2_strategy, criterion, rank) - diff --git a/tests/ut/python/parallel/test_get_next.py b/tests/ut/python/parallel/test_get_next.py index 84789db10e..71faa56975 100644 --- a/tests/ut/python/parallel/test_get_next.py +++ b/tests/ut/python/parallel/test_get_next.py @@ -20,12 +20,14 @@ from tests.ut.python.ops.test_math_ops import VirtualLoss import mindspore as ms from mindspore.common.api import _executor from mindspore.ops import composite as C -from mindspore.common.parameter import Parameter,ParameterTuple +from mindspore.common.parameter import Parameter, ParameterTuple from mindspore.ops.operations.comm_ops import _VirtualDataset from mindspore import context from mindspore.common.initializer import initializer + context.set_context(mode=context.GRAPH_MODE) + class NetWithLoss(nn.Cell): def __init__(self, network, types, shapes, output_num, strategy3=None, strategy4=None, axis=-1): super(NetWithLoss, self).__init__() @@ -64,22 +66,24 @@ def test_get_next_single(): super().__init__() self.norm = P.L2Normalize(axis=1) self.prelu = P.PReLU() - self.w = Parameter(initializer(w, [channel,]), name='w') + self.w = Parameter(initializer(w, [channel, ]), name='w') + def construct(self, data): x = self.norm(data) x = self.prelu(x, self.w) return x - net = GradWrap(NetWithLoss(Net(), [ms.float32, ms.int32],[[32,64], [32]], 2)) + net = GradWrap(NetWithLoss(Net(), [ms.float32, ms.int32], [[32, 64], [32]], 2)) _executor.compile(net) + def test_get_next_semi_auto_parallel(): class Net(nn.Cell): def __init__(self, channel=1, w=0.25, strategy1=None, strategy2=None): super().__init__() self.norm = P.L2Normalize().set_strategy(strategy1) self.prelu = P.PReLU().set_strategy(strategy2) - self.w = Parameter(initializer(w, [channel,]), name='w') + self.w = Parameter(initializer(w, [channel, ]), name='w') def construct(self, data): x = self.norm(data) @@ -87,21 +91,23 @@ def test_get_next_semi_auto_parallel(): return x context.set_auto_parallel_context(device_num=4, global_rank=0) - network = Net(strategy1=((1,4), ), strategy2=((4,1),(1, ))) - strategy3 = ((4, 1),(),()) - strategy4=((4,1), (4,1)) - net_with_loss = NetWithLoss(network, [ms.float32, ms.int32],[[32,64], [32]], 2, strategy3=strategy3, strategy4=strategy4) + network = Net(strategy1=((1, 4),), strategy2=((4, 1), (1,))) + strategy3 = ((4, 1), (), ()) + strategy4 = ((4, 1), (4, 1)) + net_with_loss = NetWithLoss(network, [ms.float32, ms.int32], [[32, 64], [32]], 2, strategy3=strategy3, + strategy4=strategy4) net = GradWrap(net_with_loss) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") compile(net) + def test_get_next_semi_auto_parallel1(): class Net(nn.Cell): def __init__(self, channel=1, w=0.25, strategy1=None, strategy2=None): super().__init__() self.norm = P.L2Normalize().set_strategy(strategy1) self.prelu = P.PReLU().set_strategy(strategy2) - self.w = Parameter(initializer(w, [channel,]), name='w') + self.w = Parameter(initializer(w, [channel, ]), name='w') def construct(self, data): x = self.norm(data) @@ -109,21 +115,23 @@ def test_get_next_semi_auto_parallel1(): return x context.set_auto_parallel_context(device_num=4, global_rank=0) - network = Net(strategy1=((1,4), ), strategy2=((4,1),(1, ))) - strategy3 = ((1, 4),(),()) - strategy4=((4,1), (4,1)) - net_with_loss = NetWithLoss(network, [ms.float32, ms.int32],[[32,64], [32]], 2, strategy3=strategy3, strategy4=strategy4) + network = Net(strategy1=((1, 4),), strategy2=((4, 1), (1,))) + strategy3 = ((1, 4), (), ()) + strategy4 = ((4, 1), (4, 1)) + net_with_loss = NetWithLoss(network, [ms.float32, ms.int32], [[32, 64], [32]], 2, strategy3=strategy3, + strategy4=strategy4) net = GradWrap(net_with_loss) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") compile(net) + def test_get_next_auto_parallel(): class Net(nn.Cell): def __init__(self, channel=1, w=0.25, strategy1=None, strategy2=None): super().__init__() self.norm = P.L2Normalize().set_strategy(strategy1) self.prelu = P.PReLU().set_strategy(strategy2) - self.w = Parameter(initializer(w, [channel,]), name='w') + self.w = Parameter(initializer(w, [channel, ]), name='w') def construct(self, data): x = self.norm(data) @@ -132,7 +140,7 @@ def test_get_next_auto_parallel(): context.set_auto_parallel_context(device_num=4, global_rank=0) network = Net() - net_with_loss = NetWithLoss(network, [ms.float32, ms.int32],[[32,64], [32]], 2) + net_with_loss = NetWithLoss(network, [ms.float32, ms.int32], [[32, 64], [32]], 2) net = GradWrap(net_with_loss) context.set_auto_parallel_context(parallel_mode="auto_parallel") compile(net) @@ -142,12 +150,11 @@ def test_only_one_get_next(): class Net(nn.Cell): def __init__(self): super().__init__() - self.get_next = P.GetNext([ms.float32, ms.int32],[[32,64], [32]], 2, "") + self.get_next = P.GetNext([ms.float32, ms.int32], [[32, 64], [32]], 2, "") def construct(self): return self.get_next() - context.set_auto_parallel_context(device_num=4, global_rank=0) net = Net() context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") diff --git a/tests/ut/python/parallel/test_get_parameter_layout.py b/tests/ut/python/parallel/test_get_parameter_layout.py index 8588cec21a..6a3b729c1b 100644 --- a/tests/ut/python/parallel/test_get_parameter_layout.py +++ b/tests/ut/python/parallel/test_get_parameter_layout.py @@ -16,7 +16,7 @@ import numpy as np from mindspore import context import mindspore.nn as nn from mindspore.ops import operations as P -from mindspore import Tensor, Parameter +from mindspore import Tensor, Parameter import mindspore as ms import mindspore.common.api as me @@ -38,7 +38,7 @@ def test_get_parameter_layout(): context.set_auto_parallel_context(device_num=8, global_rank=0) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") strategy1 = ((2, 1), (4, 1)) - strategy2 = ((2, 4), ) + strategy2 = ((2, 4),) context.set_context(mode=context.GRAPH_MODE) x = Tensor(np.ones([32, 32]), dtype=ms.float32) @@ -57,4 +57,3 @@ def test_get_parameter_layout(): if __name__ == '__main__': test_get_parameter_layout() - diff --git a/tests/ut/python/parallel/test_hybird_parallel_activation.py b/tests/ut/python/parallel/test_hybird_parallel_activation.py index 98f5b76b7d..f4c9863d23 100644 --- a/tests/ut/python/parallel/test_hybird_parallel_activation.py +++ b/tests/ut/python/parallel/test_hybird_parallel_activation.py @@ -47,6 +47,7 @@ def compile(net, x, y, b): net.set_auto_parallel() _executor.compile(net, x, y, b) + def test_matmul_tanh(): class Net(nn.Cell): def __init__(self, strategy1, strategy2, strategy3): @@ -62,7 +63,7 @@ def test_matmul_tanh(): strategy1 = ((16, 1), (1, 1)) strategy2 = ((1, 1), (1, 16)) - strategy3 = ((4, 4), ) + strategy3 = ((4, 4),) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") context.set_auto_parallel_context(device_num=16, global_rank=0) @@ -88,7 +89,7 @@ def test_matmul_activation(): strategy1 = ((16, 1), (1, 1)) strategy2 = ((1, 1), (1, 16)) - strategy3 = ((4, 4), ) + strategy3 = ((4, 4),) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") context.set_auto_parallel_context(device_num=16, global_rank=0) @@ -114,7 +115,7 @@ def test_matmul_softmax(): strategy1 = ((16, 1), (1, 1)) strategy2 = ((1, 1), (1, 16)) - strategy3 = ((16, 1), ) + strategy3 = ((16, 1),) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") context.set_auto_parallel_context(device_num=16, global_rank=0) @@ -140,7 +141,7 @@ def test_matmul_logsoftmax(): strategy1 = ((4, 2), (2, 2)) strategy2 = ((2, 4), (4, 2)) - strategy3 = ((16, 1), ) + strategy3 = ((16, 1),) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") context.set_auto_parallel_context(device_num=16, global_rank=0) @@ -169,7 +170,7 @@ def test_activations(): strategy1 = ((1, 2), (2, 2)) strategy2 = ((2, 2), (2, 1)) - strategy3 = ((4, 1), ) + strategy3 = ((4, 1),) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") context.set_auto_parallel_context(device_num=4, global_rank=0) @@ -179,6 +180,7 @@ def test_activations(): b = Tensor(np.ones([64, 64]), dtype=ms.float32) compile(net, x, y, b) + def test_activations_repeated_calculation(): class Net(nn.Cell): def __init__(self, strategy1, strategy2, strategy3, strategy4, strategy5, strategy6): @@ -197,10 +199,10 @@ def test_activations_repeated_calculation(): strategy1 = ((2, 4), (4, 8)) strategy2 = ((2, 2), (2, 1)) - strategy3 = ((2, 1), ) - strategy4 = ((2, 2), ) - strategy5 = ((4, 1), ) - strategy6 = ((8, 1), ) + strategy3 = ((2, 1),) + strategy4 = ((2, 2),) + strategy5 = ((4, 1),) + strategy6 = ((8, 1),) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3, strategy4, strategy5, strategy6))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") context.set_auto_parallel_context(device_num=64, global_rank=0) @@ -229,10 +231,10 @@ def test_activations_axis_tuple(): strategy1 = ((2, 4), (4, 8)) strategy2 = ((2, 2), (2, 1)) - strategy3 = ((2, 1), ) - strategy4 = ((2, 2), ) - strategy5 = ((1, 1), ) - strategy6 = ((8, 1), ) + strategy3 = ((2, 1),) + strategy4 = ((2, 2),) + strategy5 = ((1, 1),) + strategy6 = ((8, 1),) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3, strategy4, strategy5, strategy6))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") context.set_auto_parallel_context(device_num=64, global_rank=0) diff --git a/tests/ut/python/parallel/test_l2normalize.py b/tests/ut/python/parallel/test_l2normalize.py index 327abf71eb..e1c0017615 100644 --- a/tests/ut/python/parallel/test_l2normalize.py +++ b/tests/ut/python/parallel/test_l2normalize.py @@ -42,6 +42,7 @@ class GradWrap(nn.Cell): def construct(self, x, y, b): return C.grad_all(self.network)(x, y, b) + # model_parallel test def test_l2normalize_matmul(): class Net(nn.Cell): @@ -60,7 +61,7 @@ def test_l2normalize_matmul(): return out context.set_auto_parallel_context(device_num=8, global_rank=0) - strategy1 = ((1, 1, 4), ) + strategy1 = ((1, 1, 4),) strategy2 = ((1, 1, 4), (1, 1, 4)) strategy3 = ((1, 1, 8), (1, 1, 8)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) diff --git a/tests/ut/python/parallel/test_layer_norm.py b/tests/ut/python/parallel/test_layer_norm.py index 92bb26f346..ece4fa8e30 100644 --- a/tests/ut/python/parallel/test_layer_norm.py +++ b/tests/ut/python/parallel/test_layer_norm.py @@ -51,7 +51,7 @@ def compile(net): optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() - _executor.compile(train_net, _x, _b) + _executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_linear.py b/tests/ut/python/parallel/test_linear.py index 6b40fc8963..2c1e551379 100644 --- a/tests/ut/python/parallel/test_linear.py +++ b/tests/ut/python/parallel/test_linear.py @@ -22,6 +22,7 @@ import mindspore as ms from mindspore.common.api import _executor from mindspore.ops import composite as C + class NetWithLoss(nn.Cell): def __init__(self, network, strategy3): super(NetWithLoss, self).__init__() @@ -41,6 +42,7 @@ class GradWrap(nn.Cell): def construct(self, x, y, bias, label): return C.grad_all(self.network)(x, y, bias, label) + def test_linear(): class Net(nn.Cell): def __init__(self, strategy0, strategy1, strategy2): @@ -57,8 +59,8 @@ def test_linear(): context.set_auto_parallel_context(device_num=16, global_rank=0) strategy0 = ((2, 4), (2, 4)) - strategy1 = ((2, 4), (4, )) - strategy2 = ((2, 8), ) + strategy1 = ((2, 4), (4,)) + strategy2 = ((2, 8),) strategy3 = ((16, 1), (16, 1)) net = GradWrap(NetWithLoss(Net(strategy0, strategy1, strategy2), strategy3)) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") diff --git a/tests/ut/python/parallel/test_loop_two_matmul.py b/tests/ut/python/parallel/test_loop_two_matmul.py index 7716d14930..ec3078688e 100644 --- a/tests/ut/python/parallel/test_loop_two_matmul.py +++ b/tests/ut/python/parallel/test_loop_two_matmul.py @@ -93,4 +93,3 @@ def test_two_matmul(): net.set_auto_parallel() _executor.compile(net, x, y, b) count = count + 1 - diff --git a/tests/ut/python/parallel/test_loss_and_optimizer.py b/tests/ut/python/parallel/test_loss_and_optimizer.py index 123184fef5..7ab87a5e83 100644 --- a/tests/ut/python/parallel/test_loss_and_optimizer.py +++ b/tests/ut/python/parallel/test_loss_and_optimizer.py @@ -55,7 +55,7 @@ def test_momentum(): context.set_auto_parallel_context(device_num=4, global_rank=0) strategy1 = ((2, 1), (2, 1)) - strategy2 = ((4, 1), ) + strategy2 = ((4, 1),) strategy3 = ((4, 1), (4, 1)) x = Tensor(np.ones([64, 32]), dtype=ms.float32) @@ -71,7 +71,7 @@ def test_momentum(): train_net = TrainOneStepCell(net_with_loss, optimizer) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - compile(train_net, x, b) + compile(train_net, x, b) def test_momentum_with_loss_scale(): @@ -89,7 +89,7 @@ def test_momentum_with_loss_scale(): context.set_auto_parallel_context(device_num=4, global_rank=0) strategy1 = ((2, 1), (2, 1)) - strategy2 = ((4, 1), ) + strategy2 = ((4, 1),) strategy3 = ((4, 1), (4, 1)) x = Tensor(np.ones([64, 32]), dtype=ms.float32) @@ -105,7 +105,7 @@ def test_momentum_with_loss_scale(): train_net = TrainOneStepCell(net_with_loss, optimizer) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - compile(train_net, x, b) + compile(train_net, x, b) def test_momentum_with_dynamic_lr(): @@ -123,7 +123,7 @@ def test_momentum_with_dynamic_lr(): context.set_auto_parallel_context(device_num=4, global_rank=0) strategy1 = ((2, 1), (2, 1)) - strategy2 = ((4, 1), ) + strategy2 = ((4, 1),) strategy3 = ((4, 1), (4, 1)) x = Tensor(np.ones([64, 32]), dtype=ms.float32) @@ -140,7 +140,7 @@ def test_momentum_with_dynamic_lr(): train_net = TrainOneStepCell(net_with_loss, optimizer) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - compile(train_net, x, b) + compile(train_net, x, b) def test_momentum_with_loss_scale_and_dynamic_lr(): @@ -157,9 +157,9 @@ def test_momentum_with_loss_scale_and_dynamic_lr(): return out context.set_auto_parallel_context(device_num=4, global_rank=0) - + strategy1 = ((2, 1), (2, 1)) - strategy2 = ((4, 1), ) + strategy2 = ((4, 1),) strategy3 = ((4, 1), (4, 1)) x = Tensor(np.ones([64, 32]), dtype=ms.float32) @@ -176,7 +176,8 @@ def test_momentum_with_loss_scale_and_dynamic_lr(): train_net = TrainOneStepCell(net_with_loss, optimizer) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - compile(train_net, x, b) + compile(train_net, x, b) + def test_lars(): class Net(nn.Cell): @@ -193,7 +194,7 @@ def test_lars(): context.set_auto_parallel_context(device_num=4, global_rank=0) strategy1 = ((2, 1), (2, 1)) - strategy2 = ((4, 1), ) + strategy2 = ((4, 1),) strategy3 = ((4, 1), (4, 1)) x = Tensor(np.ones([64, 32]), dtype=ms.float32) @@ -210,4 +211,4 @@ def test_lars(): train_net = TrainOneStepCell(net_with_loss, optimizer) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - compile(train_net, x, b) + compile(train_net, x, b) diff --git a/tests/ut/python/parallel/test_matmul_dropout.py b/tests/ut/python/parallel/test_matmul_dropout.py index 436690d179..8d6276db17 100644 --- a/tests/ut/python/parallel/test_matmul_dropout.py +++ b/tests/ut/python/parallel/test_matmul_dropout.py @@ -62,7 +62,7 @@ def test_two_matmul_dropout(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((4, 2), (2, 1)) - strategy2 = ((8, 1), ) + strategy2 = ((8, 1),) strategy3 = ((1, 8), (8, 1)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") diff --git a/tests/ut/python/parallel/test_matmul_tensor.py b/tests/ut/python/parallel/test_matmul_tensor.py index 39bb2c6a37..a8211f2d19 100644 --- a/tests/ut/python/parallel/test_matmul_tensor.py +++ b/tests/ut/python/parallel/test_matmul_tensor.py @@ -62,7 +62,7 @@ def test_two_matmul(): self.fill = P.Fill() def construct(self, x, y): - fill = self.diag(self.fill(mstype.float32, (128, ), 1.0)) + fill = self.diag(self.fill(mstype.float32, (128,), 1.0)) out1 = self.matmul1(fill, x) out2 = self.matmul2(y, fill) out = self.matmul3(out1, out2) @@ -77,7 +77,7 @@ def test_two_matmul(): x = Tensor(np.ones([128, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 128]), dtype=ms.float32) - + compile(net, x, y) @@ -116,7 +116,7 @@ def test_two_matmul1(): self.fill = P.Fill() def construct(self, x, y): - fill = self.diag(self.fill(mstype.float32, (128, ), 1.0)) + fill = self.diag(self.fill(mstype.float32, (128,), 1.0)) out1 = self.matmul1(fill, x) out2 = self.matmul2(fill, y) out = self.matmul3(out1, out2) @@ -131,7 +131,7 @@ def test_two_matmul1(): x = Tensor(np.ones([128, 128]), dtype=ms.float32) y = Tensor(np.ones([128, 128]), dtype=ms.float32) - + compile(net, x, y) @@ -156,5 +156,5 @@ def test_matmul_add_tensor(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) - + compile(net, x, y) diff --git a/tests/ut/python/parallel/test_mix_precision_hybrid_parallel.py b/tests/ut/python/parallel/test_mix_precision_hybrid_parallel.py index f67b854e79..6349e48688 100644 --- a/tests/ut/python/parallel/test_mix_precision_hybrid_parallel.py +++ b/tests/ut/python/parallel/test_mix_precision_hybrid_parallel.py @@ -82,5 +82,5 @@ def test_two_matmul(): y = Tensor(np.ones([64, 32]), dtype=ms.float32) b = Tensor(np.ones([32, 64]), dtype=ms.float32) z = Tensor(np.ones([64, 64]), dtype=ms.float32) - + _executor.compile(net, x, y, b, z) diff --git a/tests/ut/python/parallel/test_neg.py b/tests/ut/python/parallel/test_neg.py index 4d9e16fd6f..09d5f7171e 100644 --- a/tests/ut/python/parallel/test_neg.py +++ b/tests/ut/python/parallel/test_neg.py @@ -42,14 +42,14 @@ def compile(net): optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() - _executor.compile(train_net, _x, _b) + _executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() def test_neg_data_parallel(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) strategy1 = ((16, 1, 1), (16, 1, 1)) - strategy2 = ((16, 1, 1), ) + strategy2 = ((16, 1, 1),) net = Net(_w1, strategy1, strategy2) compile(net) @@ -57,7 +57,7 @@ def test_neg_data_parallel(): def test_neg_model_parallel(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) strategy1 = ((1, 1, 16), (1, 1, 16)) - strategy2 = ((1, 1, 16), ) + strategy2 = ((1, 1, 16),) net = Net(_w1, strategy1, strategy2) compile(net) @@ -65,7 +65,7 @@ def test_neg_model_parallel(): def test_neg_hybrid_parallel(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) strategy1 = ((2, 2, 4), (2, 2, 4)) - strategy2 = ((2, 2, 4), ) + strategy2 = ((2, 2, 4),) net = Net(_w1, strategy1, strategy2) compile(net) @@ -79,7 +79,6 @@ def test_neg_auto_parallel(): def test_neg_repeat_calc(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) strategy1 = ((2, 2, 4), (2, 2, 4)) - strategy2 = ((1, 2, 2), ) + strategy2 = ((1, 2, 2),) net = Net(_w1, strategy1, strategy2) compile(net) - diff --git a/tests/ut/python/parallel/test_one_dev.py b/tests/ut/python/parallel/test_one_dev.py index efd4889ce6..e5f8eefdae 100644 --- a/tests/ut/python/parallel/test_one_dev.py +++ b/tests/ut/python/parallel/test_one_dev.py @@ -84,7 +84,7 @@ def all_to_all_common(): opt = Momentum(net.trainable_params(), learning_rate, momentum) model = Model(net, loss, opt) - model.train(epoch_size, dataset,dataset_sink_mode=False) + model.train(epoch_size, dataset, dataset_sink_mode=False) strategys = _executor._get_strategy(model._train_network) return strategys @@ -99,4 +99,3 @@ def test_one_dev(): assert v == [[1, 1]] elif re.search('MatMul-op', k) is not None: assert v == [[1, 1], [1, 1]] - diff --git a/tests/ut/python/parallel/test_one_hot_net.py b/tests/ut/python/parallel/test_one_hot_net.py index fd06535b66..bed8c57c16 100644 --- a/tests/ut/python/parallel/test_one_hot_net.py +++ b/tests/ut/python/parallel/test_one_hot_net.py @@ -28,19 +28,18 @@ import mindspore as ms from mindspore.common.api import _executor from mindspore import context - -device_num=16 +device_num = 16 device_id = 2 class StrategyModel(): - onehot_strategy = ((1, device_num),(),()) - twod_strategy = ((1, device_num), ) - twod_strategy_m = ((device_num, 1), ) + onehot_strategy = ((1, device_num), (), ()) + twod_strategy = ((1, device_num),) + twod_strategy_m = ((device_num, 1),) scalar_twod_strategy = ((), (1, device_num)) twod_scalar_strategy = ((1, device_num), ()) - scalar_strategy = ((), ) - oned_strategy = ((1, ), ) + scalar_strategy = ((),) + oned_strategy = ((1,),) scalar_scalar_strategy = ((), ()) twod_twod_strategy = ((1, device_num), (1, device_num)) twod_twodbc_strategy = ((1, device_num), (1, 1)) @@ -48,13 +47,13 @@ class StrategyModel(): class StrategyBatch(): - onehot_strategy = ((device_num, 1),(),()) - twod_strategy = ((1, device_num), ) - twod_strategy_m = ((device_num, 1), ) + onehot_strategy = ((device_num, 1), (), ()) + twod_strategy = ((1, device_num),) + twod_strategy_m = ((device_num, 1),) scalar_twod_strategy = ((), (1, device_num)) twod_scalar_strategy = ((1, device_num), ()) - scalar_strategy = ((), ) - oned_strategy = ((1, ), ) + scalar_strategy = ((),) + oned_strategy = ((1,),) scalar_scalar_strategy = ((), ()) twod_twod_strategy = ((1, device_num), (1, device_num)) twod_twodbc_strategy = ((1, device_num), (1, 1)) @@ -164,7 +163,7 @@ class SemiAutoOneHotNet(Cell): w = self.normalize2(self.weight) fc_o = self.fc(input_n, w) fc_o_shape = F.shape(fc_o) - one_hot_float = self.onehot(label, fc_o_shape[1],self.on_value, self.off_value) + one_hot_float = self.onehot(label, fc_o_shape[1], self.on_value, self.off_value) local_label = self.cast(one_hot_float, mstype.int32) exp_o = self.exp(fc_o) @@ -173,7 +172,8 @@ class SemiAutoOneHotNet(Cell): exp2_o = self.exp2(mul_const2_o) mul_const3_o = self.mul_const3(exp2_o, self.c_const) mul_const4_o = self.mul_const4(F.scalar_to_array(1), local_label) - mul6_o = self.mul6(self.mul(mul_const3_o, one_hot_float), self.mul2(fc_o, self.cast2(mul_const4_o, mstype.float32))) + mul6_o = self.mul6(self.mul(mul_const3_o, one_hot_float), + self.mul2(fc_o, self.cast2(mul_const4_o, mstype.float32))) mul_const5_o = self.mul_const5(mul6_o, self.d_const) max_o = self.reduce_max(mul_const5_o, -1) @@ -186,7 +186,8 @@ class SemiAutoOneHotNet(Cell): mul3_o = self.mul3(log_o, one_hot_float) mul7_o = self.mul7(mul3_o, self.cast3(F.scalar_to_array(-1), mstype.float32)) sum2_o = self.reduce_sum_2(mul7_o, -1) - loss = self.mul8(self.reduce_sum_3(sum2_o, -1), self.cast4(F.scalar_to_array(F.shape(mul_const5_o)[0]), mstype.float32)) + loss = self.mul8(self.reduce_sum_3(sum2_o, -1), + self.cast4(F.scalar_to_array(F.shape(mul_const5_o)[0]), mstype.float32)) return loss @@ -255,7 +256,7 @@ class BNReshapeDenseBNNet(nn.Cell): def construct(self, x, label): x = self.batch_norm(x) - x = self.reshape(x, (16, 2*32*32)) + x = self.reshape(x, (16, 2 * 32 * 32)) x = self.fc(x) x = self.batch_norm2(x) loss = self.loss(x, label) @@ -272,7 +273,7 @@ def test_bn_reshape_dense_bn_train_loss(): net = GradWrap(NetWithLoss(BNReshapeDenseBNNet())) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() - + _executor.compile(net, input, label) @@ -286,7 +287,7 @@ def test_semi_one_hot_net_batch(): net = GradWrap(NetWithLoss(net)) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() - + _executor.compile(net, input, label) @@ -307,5 +308,3 @@ def test_semi_one_hot_net_model(): context.set_context(mode=context.GRAPH_MODE) model = Model(net, optimizer=opt) model.train(epoch_size, dataset, dataset_sink_mode=False) - - diff --git a/tests/ut/python/parallel/test_one_weight_parameter.py b/tests/ut/python/parallel/test_one_weight_parameter.py index 5c7f324c47..a0ad940270 100644 --- a/tests/ut/python/parallel/test_one_weight_parameter.py +++ b/tests/ut/python/parallel/test_one_weight_parameter.py @@ -22,6 +22,7 @@ from mindspore.common.api import _executor from mindspore.ops import composite as C from mindspore.ops import functional as F + class NetWithLoss(nn.Cell): def __init__(self, network, strategy3): super(NetWithLoss, self).__init__() @@ -32,17 +33,19 @@ class NetWithLoss(nn.Cell): predict = self.network(x) return self.loss(predict, b)[0] + class OneStepCell(nn.Cell): def __init__(self, network): super(OneStepCell, self).__init__(auto_prefix=False) self.network = network self.weights = ParameterTuple(network.network.trainable_params()) - def construct(self, data, label): + def construct(self, data, label): weights = self.weights grads = C.grad_by_list(self.network, weights)(data, label) return grads + def test_one_weight_parameter(): class Net(nn.Cell): def __init__(self, strategy1, weight): diff --git a/tests/ut/python/parallel/test_onehot.py b/tests/ut/python/parallel/test_onehot.py index 6ed93f5241..e4e6ebb9c5 100644 --- a/tests/ut/python/parallel/test_onehot.py +++ b/tests/ut/python/parallel/test_onehot.py @@ -21,6 +21,7 @@ import mindspore as ms from mindspore.common.api import _executor from mindspore.ops import composite as C from mindspore.ops.operations.comm_ops import _VirtualDataset + context.set_context(mode=context.GRAPH_MODE) @@ -79,7 +80,7 @@ def compile_graph(strategy1, strategy2, strategy3, strategy4, auto=False, onthot def test_onehot_model_parallel(): context.set_auto_parallel_context(device_num=16, global_rank=0) strategy1 = ((2, 4), (4, 2)) - strategy2 = ((2, 8), ) + strategy2 = ((2, 8),) strategy3 = ((1, 16), (), ()) strategy4 = ((16, 1), (16, 1)) compile_graph(strategy1, strategy2, strategy3, strategy4) @@ -88,7 +89,7 @@ def test_onehot_model_parallel(): def test_onehot_batch_parallel(): context.set_auto_parallel_context(device_num=16, global_rank=0) strategy1 = ((2, 4), (4, 2)) - strategy2 = ((2, 8), ) + strategy2 = ((2, 8),) strategy3 = ((16, 1), (), ()) strategy4 = ((16, 1), (16, 1)) compile_graph(strategy1, strategy2, strategy3, strategy4) @@ -97,8 +98,8 @@ def test_onehot_batch_parallel(): def test_onehot_batch_parallel_invalid_strategy(): context.set_auto_parallel_context(device_num=16, global_rank=0) strategy1 = ((2, 4), (4, 2)) - strategy2 = ((2, 8), ) - strategy3 = ((16, ), (), ()) + strategy2 = ((2, 8),) + strategy3 = ((16,), (), ()) strategy4 = ((16, 1), (16, 1)) try: compile_graph(strategy1, strategy2, strategy3, strategy4) @@ -109,7 +110,7 @@ def test_onehot_batch_parallel_invalid_strategy(): def test_onehot_repeated_calculation(): context.set_auto_parallel_context(device_num=16, global_rank=0) strategy1 = ((2, 4), (4, 2)) - strategy2 = ((2, 8), ) + strategy2 = ((2, 8),) strategy3 = ((4, 1), (), ()) strategy4 = ((16, 1), (16, 1)) compile_graph(strategy1, strategy2, strategy3, strategy4) @@ -127,7 +128,7 @@ def test_onehot_auto(): def test_onehot_model_parallel(): context.set_auto_parallel_context(device_num=16, global_rank=0) strategy1 = ((2, 4), (4, 2)) - strategy2 = ((2, 8), ) + strategy2 = ((2, 8),) strategy3 = ((1, 16), (), ()) strategy4 = ((16, 1), (16, 1)) compile_graph(strategy1, strategy2, strategy3, strategy4) @@ -136,7 +137,7 @@ def test_onehot_model_parallel(): def test_onehot_batch_parallel_axis0(): context.set_auto_parallel_context(device_num=16, global_rank=0) strategy1 = ((2, 4), (4, 2)) - strategy2 = ((2, 8), ) + strategy2 = ((2, 8),) strategy3 = ((16, 1), (), ()) strategy4 = ((16, 1), (16, 1)) compile_graph(strategy1, strategy2, strategy3, strategy4, onthot_axis=0) @@ -146,7 +147,7 @@ def test_onehot_batch_parallel_axis0(): def test_onehot_batch_parallel_invalid_strategy_axis0(): context.set_auto_parallel_context(device_num=16, global_rank=0) strategy1 = ((2, 4), (4, 2)) - strategy2 = ((2, 8), ) + strategy2 = ((2, 8),) strategy3 = None strategy4 = ((16, 1), (16, 1)) try: @@ -158,7 +159,7 @@ def test_onehot_batch_parallel_invalid_strategy_axis0(): def test_onehot_repeated_calculation_axis0(): context.set_auto_parallel_context(device_num=16, global_rank=0) strategy1 = ((2, 4), (4, 2)) - strategy2 = ((2, 8), ) + strategy2 = ((2, 8),) strategy3 = ((4, 1), (), ()) strategy4 = ((16, 1), (16, 1)) compile_graph(strategy1, strategy2, strategy3, strategy4, onthot_axis=0) diff --git a/tests/ut/python/parallel/test_operator_model_parallel.py b/tests/ut/python/parallel/test_operator_model_parallel.py index 389aeb1f08..8b63673ea4 100644 --- a/tests/ut/python/parallel/test_operator_model_parallel.py +++ b/tests/ut/python/parallel/test_operator_model_parallel.py @@ -33,15 +33,15 @@ from mindspore.common.parameter import Parameter from mindspore import context from tests.dataset_mock import MindData - dev_num = 8 -strategy_no_weight = ((dev_num, 1, 1, 1), ) +strategy_no_weight = ((dev_num, 1, 1, 1),) strategy_weight = ((dev_num, 1, 1, 1), (1, 1, 1, 1)) strategy_add = ((dev_num, 1, 1, 1), (dev_num, 1, 1, 1)) -strategy_bn = ((dev_num, 1, 1, 1), (1, ), (1, )) +strategy_bn = ((dev_num, 1, 1, 1), (1,), (1,)) strategy_fc_weight_nobias = ((1, dev_num), (1, dev_num)) -strategy_tensor_add = ((1, dev_num), (dev_num, )) +strategy_tensor_add = ((1, dev_num), (dev_num,)) + class DenseWrap(Cell): def __init__(self, @@ -321,7 +321,7 @@ class ResNet(Cell): class ResNetModelParallel(Cell): def __init__(self, block, layer_num, num_classes=100): super(ResNetModelParallel, self).__init__() - self.relu = P.ReLU().set_strategy(((1, dev_num, 1, 1), )) + self.relu = P.ReLU().set_strategy(((1, dev_num, 1, 1),)) self.maxpool = MaxPool2d(kernel_size=3, stride=2, pad_mode="same") self.layer1 = MakeLayer0( block, layer_num[0], in_channels=64, out_channels=256, stride=1) diff --git a/tests/ut/python/parallel/test_optimizer.py b/tests/ut/python/parallel/test_optimizer.py index 395adda179..4ef4f891a4 100644 --- a/tests/ut/python/parallel/test_optimizer.py +++ b/tests/ut/python/parallel/test_optimizer.py @@ -60,4 +60,3 @@ def test_dense_gen_graph(): label = Tensor(np.zeros([64, 32]).astype(np.float32)) network.set_auto_parallel() _executor.compile(network, predict, label) - diff --git a/tests/ut/python/parallel/test_optimizer_clone_weight.py b/tests/ut/python/parallel/test_optimizer_clone_weight.py index 969c2f0cff..defe3adb68 100644 --- a/tests/ut/python/parallel/test_optimizer_clone_weight.py +++ b/tests/ut/python/parallel/test_optimizer_clone_weight.py @@ -53,9 +53,9 @@ def test_optimizer_clone_weight(): return out context.set_auto_parallel_context(device_num=4, global_rank=0) - + strategy1 = ((2, 1), (2, 1)) - strategy2 = ((4, 1), ) + strategy2 = ((4, 1),) strategy3 = ((4, 1), (4, 1)) x = Tensor(np.ones([64, 32]), dtype=ms.float32) @@ -71,7 +71,7 @@ def test_optimizer_clone_weight(): train_net = TrainOneStepCell(net_with_loss, optimizer) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - compile(train_net, x, b) + compile(train_net, x, b) def test_optimizer_clone_weight2(): @@ -88,9 +88,9 @@ def test_optimizer_clone_weight2(): return out context.set_auto_parallel_context(device_num=4, global_rank=0) - + strategy1 = ((2, 1), (2, 1)) - strategy2 = ((4, 1), ) + strategy2 = ((4, 1),) strategy3 = ((4, 1), (4, 1)) x = Tensor(np.ones([64, 32]), dtype=ms.float32) @@ -106,4 +106,4 @@ def test_optimizer_clone_weight2(): train_net = TrainOneStepCell(net_with_loss, optimizer) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - compile(train_net, x, b) + compile(train_net, x, b) diff --git a/tests/ut/python/parallel/test_parameter_init.py b/tests/ut/python/parallel/test_parameter_init.py index 6bb1440014..250d5e0db9 100644 --- a/tests/ut/python/parallel/test_parameter_init.py +++ b/tests/ut/python/parallel/test_parameter_init.py @@ -51,7 +51,7 @@ def test_parameter_init(): weight = Tensor(np.ones([64, 32]), dtype=ms.float32) net = Net(strategy1, weight) - net(x,) + net(x, ) if __name__ == '__main__': diff --git a/tests/ut/python/parallel/test_prelu.py b/tests/ut/python/parallel/test_prelu.py index 67bdb3de04..19acc07f99 100644 --- a/tests/ut/python/parallel/test_prelu.py +++ b/tests/ut/python/parallel/test_prelu.py @@ -87,15 +87,17 @@ def test_prelu_parallel_success1(): def __init__(self, strategy): super().__init__() self.prelu = P.PReLU().set_strategy(strategy) + def construct(self, x, y): out = self.prelu(x, y) return out + context.reset_auto_parallel_context() context.set_auto_parallel_context(device_num=8, global_rank=0) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - strategy = ((1, 1, 1, 1), (1, )) - x = Tensor(np.random.rand(4, 4, 32, 64),dtype=ms.float32) - w = Tensor(np.random.rand(4),dtype=ms.float32) + strategy = ((1, 1, 1, 1), (1,)) + x = Tensor(np.random.rand(4, 4, 32, 64), dtype=ms.float32) + w = Tensor(np.random.rand(4), dtype=ms.float32) net = GradWrap(NetWithLoss(Net(strategy))) compile(net, x, w) @@ -105,15 +107,17 @@ def test_prelu_parallel_success2(): def __init__(self, strategy): super().__init__() self.prelu = P.PReLU().set_strategy(strategy) + def construct(self, x, y): out = self.prelu(x, y) return out + context.reset_auto_parallel_context() context.set_auto_parallel_context(device_num=64, global_rank=0) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - strategy = ((2, 1, 4, 8), (1, )) - x = Tensor(np.random.rand(4, 4, 32, 64),dtype=ms.float32) - w = Tensor(np.random.rand(4),dtype=ms.float32) + strategy = ((2, 1, 4, 8), (1,)) + x = Tensor(np.random.rand(4, 4, 32, 64), dtype=ms.float32) + w = Tensor(np.random.rand(4), dtype=ms.float32) net = GradWrap(NetWithLoss(Net(strategy))) compile(net, x, w) @@ -129,7 +133,6 @@ def test_prelu_parallel_success3(): predict = self.network(x, y, w) return self.loss(predict) - class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -143,6 +146,7 @@ def test_prelu_parallel_success3(): super().__init__() self.matmul = P.MatMul().set_strategy(strategy1) self.prelu = P.PReLU().set_strategy(strategy2) + def construct(self, x, y, w): out = self.matmul(x, y) out = self.prelu(out, w) @@ -152,10 +156,10 @@ def test_prelu_parallel_success3(): context.set_auto_parallel_context(device_num=64, global_rank=0) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") strategy1 = ((2, 4), (4, 2)) - strategy2 = ((32, 1), (1, )) - x = Tensor(np.random.rand(128, 64),dtype=ms.float32) - y = Tensor(np.random.rand(64, 16),dtype=ms.float32) - w = Tensor(np.random.rand(16),dtype=ms.float32) + strategy2 = ((32, 1), (1,)) + x = Tensor(np.random.rand(128, 64), dtype=ms.float32) + y = Tensor(np.random.rand(64, 16), dtype=ms.float32) + w = Tensor(np.random.rand(16), dtype=ms.float32) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) net.set_auto_parallel() _executor.compile(net, x, y, w) @@ -166,15 +170,17 @@ def test_prelu_parallel_success4(): def __init__(self, strategy): super().__init__() self.prelu = P.PReLU().set_strategy(strategy) + def construct(self, x, y): out = self.prelu(x, y) return out + context.reset_auto_parallel_context() context.set_auto_parallel_context(device_num=64, global_rank=0) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - strategy = ((2, 4, 4, 2), (4, )) - x = Tensor(np.random.rand(4, 16, 32, 64),dtype=ms.float32) - w = Tensor(np.random.rand(16),dtype=ms.float32) + strategy = ((2, 4, 4, 2), (4,)) + x = Tensor(np.random.rand(4, 16, 32, 64), dtype=ms.float32) + w = Tensor(np.random.rand(16), dtype=ms.float32) net = GradWrap(NetWithLoss(Net(strategy))) compile(net, x, w) @@ -184,14 +190,16 @@ def test_prelu_parallel_success5(): def __init__(self, strategy): super().__init__() self.prelu = P.PReLU().set_strategy(strategy) + def construct(self, x, y): out = self.prelu(x, y) return out + context.reset_auto_parallel_context() context.set_auto_parallel_context(device_num=64, global_rank=0) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - strategy = ((2, 4, 4, 2), (1, )) - x = Tensor(np.random.rand(4, 16, 32, 64),dtype=ms.float32) - w = Tensor(np.random.rand(1),dtype=ms.float32) + strategy = ((2, 4, 4, 2), (1,)) + x = Tensor(np.random.rand(4, 16, 32, 64), dtype=ms.float32) + w = Tensor(np.random.rand(1), dtype=ms.float32) net = GradWrap(NetWithLoss(Net(strategy))) compile(net, x, w) diff --git a/tests/ut/python/parallel/test_prelu_cell.py b/tests/ut/python/parallel/test_prelu_cell.py index a2ca303244..4ab7ec57fa 100644 --- a/tests/ut/python/parallel/test_prelu_cell.py +++ b/tests/ut/python/parallel/test_prelu_cell.py @@ -25,6 +25,7 @@ from tests.dataset_mock import MindData from mindspore import context from mindspore.ops import functional as F from mindspore.common.initializer import initializer + context.set_context(mode=context.GRAPH_MODE) @@ -66,11 +67,11 @@ class PReLU(nn.Cell): if not isinstance(w, Tensor): raise TypeError("w only support np.float32, float or Tensor type.") - self.w = Parameter(initializer(w, [channel,]), name='a') + self.w = Parameter(initializer(w, [channel, ]), name='a') self.prelu = P.PReLU() - self.relu = P.ReLU().set_strategy(((1, ), )) - self.sub = P.Sub().set_strategy(((1, ), (1, ))) - self.assign_sub = P.AssignSub().set_strategy(((1, ), (1, ))) + self.relu = P.ReLU().set_strategy(((1,),)) + self.sub = P.Sub().set_strategy(((1,), (1,))) + self.assign_sub = P.AssignSub().set_strategy(((1,), (1,))) def construct(self, x): u = self.relu(self.w) diff --git a/tests/ut/python/parallel/test_reduce_method_info.py b/tests/ut/python/parallel/test_reduce_method_info.py index 2686e593ac..87c994c195 100644 --- a/tests/ut/python/parallel/test_reduce_method_info.py +++ b/tests/ut/python/parallel/test_reduce_method_info.py @@ -65,7 +65,7 @@ def test_sum_mul(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((1, 1, 8), (1, 1, 8)) - strategy2 = ((4, 1, 2), ) + strategy2 = ((4, 1, 2),) strategy3 = ((2, 4), (2, 4)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -92,7 +92,7 @@ def test_sum_mul2(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((1, 1, 4, 2), (1, 1, 4, 2)) - strategy2 = ((2, 4, 1, 1), ) + strategy2 = ((2, 4, 1, 1),) strategy3 = ((2, 4), (2, 4)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -119,7 +119,7 @@ def test_sum_mul3(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((1, 4, 2), (1, 4, 2)) - strategy2 = ((4, 2, 1), ) + strategy2 = ((4, 2, 1),) strategy3 = ((2, 4), (2, 4)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -146,7 +146,7 @@ def test_sum_mul4(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((1, 4, 2), (1, 4, 2)) - strategy2 = ((2, 2, 2), ) + strategy2 = ((2, 2, 2),) strategy3 = ((4, 2, 1), (4, 2, 1)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -171,7 +171,7 @@ def test_sum_mul5(): context.set_auto_parallel_context(device_num=64, global_rank=0) strategy1 = ((1, 8, 8), (1, 8, 8)) - strategy2 = ((2, 4, 1), ) + strategy2 = ((2, 4, 1),) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -195,7 +195,7 @@ def test_sum_mul6(): context.set_auto_parallel_context(device_num=64, global_rank=0) strategy1 = ((1, 8, 8), (1, 8, 8)) - strategy2 = ((2, 1, 4), ) + strategy2 = ((2, 1, 4),) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -219,7 +219,7 @@ def test_sum_mul7(): context.set_auto_parallel_context(device_num=64, global_rank=0) strategy1 = ((1, 8, 8), (1, 8, 8)) - strategy2 = ((2, 4, 1), ) + strategy2 = ((2, 4, 1),) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -245,7 +245,7 @@ def test_max_mul(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((1, 4, 2), (1, 4, 2)) - strategy2 = ((4, 1, 2), ) + strategy2 = ((4, 1, 2),) strategy3 = ((2, 4), (2, 4)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -272,7 +272,7 @@ def test_min_mul(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((1, 4, 2), (1, 4, 2)) - strategy2 = ((4, 1, 2), ) + strategy2 = ((4, 1, 2),) strategy3 = ((2, 4), (2, 4)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -299,7 +299,7 @@ def test_reduce_mean_mul_float32(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((1, 4, 2), (1, 4, 2)) - strategy2 = ((4, 1, 2), ) + strategy2 = ((4, 1, 2),) strategy3 = ((2, 4), (2, 4)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -307,7 +307,7 @@ def test_reduce_mean_mul_float32(): x = Tensor(np.ones([128, 32, 64]), dtype=ms.float32) y = Tensor(np.ones([128, 32, 64]), dtype=ms.float32) b = Tensor(np.ones([32, 64]), dtype=ms.float32) - + compile(net, x, y, b) @@ -349,7 +349,7 @@ def gen_inputs_and_compile(net): def tobefixed_test_arg_max_with_value_mul_semi_axis_parallel(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((1, 4, 2), (1, 4, 2)) - strategy2 = ((4, 1, 2), ) + strategy2 = ((4, 1, 2),) strategy3 = ((2, 4), (2, 4)) net = GradWrap(NetWithLoss(ArgMaxWithValueNet(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -359,7 +359,7 @@ def tobefixed_test_arg_max_with_value_mul_semi_axis_parallel(): def test_arg_max_with_value_mul_semi(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((1, 4, 2), (1, 4, 2)) - strategy2 = ((4, 1, 1), ) + strategy2 = ((4, 1, 1),) strategy3 = ((2, 4), (2, 4)) net = GradWrap(NetWithLoss(ArgMaxWithValueNet(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -379,18 +379,17 @@ def test_arg_max_with_value_mul_auto(): def test_arg_min_with_value_mul_semi_axis_parallel(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((1, 4, 2), (1, 4, 2)) - strategy2 = ((4, 1, 2), ) + strategy2 = ((4, 1, 2),) strategy3 = ((2, 4), (2, 4)) net = GradWrap(NetWithLoss(ArgMinWithValueNet(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") gen_inputs_and_compile(net) - def test_arg_min_with_value_mul_semi(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((1, 4, 2), (1, 4, 2)) - strategy2 = ((4, 1, 1), ) + strategy2 = ((4, 1, 1),) strategy3 = ((2, 4), (2, 4)) net = GradWrap(NetWithLoss(ArgMinWithValueNet(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -424,8 +423,8 @@ class ArgMinWithValueNet2(nn.Cell): def tobefixed_test_arg_min_with_value_mul_semi_axis_parallel2(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((1, 4, 2), (1, 4, 2)) - strategy2 = ((4, 1, 2), ) - strategy3 = ((2, 4, 1), ) + strategy2 = ((4, 1, 2),) + strategy3 = ((2, 4, 1),) net = GradWrap(NetWithLoss(ArgMinWithValueNet2(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") gen_inputs_and_compile(net) @@ -434,8 +433,8 @@ def tobefixed_test_arg_min_with_value_mul_semi_axis_parallel2(): def test_arg_min_with_value_mul_semi2(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((1, 4, 2), (1, 4, 2)) - strategy2 = ((4, 1, 1), ) - strategy3 = ((2, 4, 1), ) + strategy2 = ((4, 1, 1),) + strategy3 = ((2, 4, 1),) net = GradWrap(NetWithLoss(ArgMinWithValueNet2(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") gen_inputs_and_compile(net) @@ -467,11 +466,10 @@ def test_cross_batch(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((4, 2), (4, 2)) - strategy2 = ((2, 1), ) - strategy3 = ((8, ), ) + strategy2 = ((2, 1),) + strategy3 = ((8,),) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - x = Tensor(np.ones([32, 64]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) @@ -495,11 +493,10 @@ def test_cross_batch2(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((4, 2), (4, 2)) - strategy2 = ((2, 1), ) - strategy3 = ((8, ), ) + strategy2 = ((2, 1),) + strategy3 = ((8,),) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - x = Tensor(np.ones([32, 64]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) @@ -547,7 +544,7 @@ def test_max_empty_tuple(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((1, 4, 2), (1, 4, 2)) - strategy2 = ((4, 1, 2), ) + strategy2 = ((4, 1, 2),) strategy3 = ((), (1, 1)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -555,5 +552,5 @@ def test_max_empty_tuple(): x = Tensor(np.ones([128, 32, 64]), dtype=ms.float32) y = Tensor(np.ones([128, 32, 64]), dtype=ms.float32) b = Tensor(np.ones([128, 32]), dtype=ms.float32) - + compile(net, x, y, b) diff --git a/tests/ut/python/parallel/test_reshape.py b/tests/ut/python/parallel/test_reshape.py index b3386a49ce..903b652112 100644 --- a/tests/ut/python/parallel/test_reshape.py +++ b/tests/ut/python/parallel/test_reshape.py @@ -31,9 +31,11 @@ from mindspore.ops import functional as F from mindspore.common.parameter import ParameterTuple from mindspore.common import dtype as mstype from mindspore.parallel import set_algo_parameters + context.set_context(mode=context.GRAPH_MODE) context.reset_auto_parallel_context() + class Dataset(MindData): def __init__(self, predict, label, length=3, input_num=2): super(Dataset, self).__init__(size=length) @@ -93,14 +95,14 @@ def reshape_common(parallel_mode, strategy0, strategy1, strategy2, strategy_loss loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) loss.softmax_cross_entropy.set_strategy(strategy_loss) - loss.one_hot.set_strategy(((8,1), (), ())) + loss.one_hot.set_strategy(((8, 1), (), ())) opt = Momentum(net.trainable_params(), learning_rate, momentum) model = Model(net, loss, opt) model.train(epoch_size, dataset, dataset_sink_mode=False) def test_reshape1(): - strategy0 = ((8, 1, 1, 1), ) + strategy0 = ((8, 1, 1, 1),) strategy1 = None strategy2 = ((8, 1), (1, 1)) strategy_loss = ((8, 1), (8, 1)) @@ -108,8 +110,8 @@ def test_reshape1(): def test_reshape1_strategy_1(): - strategy0 = ((8, 1, 1, 1), ) - strategy1 = ((8, 1, 1, 1), ) + strategy0 = ((8, 1, 1, 1),) + strategy1 = ((8, 1, 1, 1),) strategy2 = ((8, 1), (1, 1)) strategy_loss = ((8, 1), (8, 1)) try: @@ -119,8 +121,8 @@ def test_reshape1_strategy_1(): def test_reshape1_strategy_2(): - strategy0 = ((8, 1, 1, 1), ) - strategy1 = ((8, 1, 1, 1), ) + strategy0 = ((8, 1, 1, 1),) + strategy1 = ((8, 1, 1, 1),) strategy2 = ((8, 1), (1, 1)) strategy_loss = ((8, 1), (8, 1)) try: @@ -130,7 +132,7 @@ def test_reshape1_strategy_2(): def test_reshape2(): - strategy0 = ((8, 1, 1, 1), ) + strategy0 = ((8, 1, 1, 1),) strategy1 = None strategy2 = ((8, 1), (1, 1)) strategy_loss = ((8, 1), (8, 1)) @@ -138,7 +140,7 @@ def test_reshape2(): def test_reshape3(): - strategy0 = ((2, 1, 1, 1), ) + strategy0 = ((2, 1, 1, 1),) strategy1 = None strategy2 = ((8, 1), (1, 1)) strategy_loss = ((8, 1), (8, 1)) @@ -146,7 +148,7 @@ def test_reshape3(): def test_reshape4(): - strategy0 = ((1, 1, 1, 1), ) + strategy0 = ((1, 1, 1, 1),) strategy1 = None strategy2 = ((8, 1), (1, 1)) strategy_loss = ((8, 1), (8, 1)) @@ -154,7 +156,7 @@ def test_reshape4(): def test_reshape5(): - strategy0 = ((2, 1, 1, 1), ) + strategy0 = ((2, 1, 1, 1),) strategy1 = None strategy2 = ((1, 8), (8, 1)) strategy_loss = ((8, 1), (8, 1)) @@ -316,7 +318,7 @@ def reshape_net2(backbone): net = GradWrap(NetWithLoss(backbone)) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - + compile(net, input) @@ -393,6 +395,7 @@ class TrainOneStepCell(nn.Cell): >>> loss_net = WithLossCell(net, loss_fn) >>> train_net = TrainOneStepCell(loss_net, optim) """ + def __init__(self, network, optimizer, sens=1.0): super(TrainOneStepCell, self).__init__(auto_prefix=False) self.network = network @@ -479,7 +482,7 @@ def test_batchnorm_reshape_train(): input = Tensor(np.ones([batch_size * device_num, 512]).astype(np.float32) * 0.01) net = GradWrap(NetWithLoss(BatchNormReshapeNet())) - + compile(net, input) @@ -503,7 +506,7 @@ class BNReshapeDenseBNNet(nn.Cell): def construct(self, x): x = self.batch_norm(x) - x = self.reshape(x, (16, 2*32*32)) + x = self.reshape(x, (16, 2 * 32 * 32)) x = self.fc(x) x = self.batch_norm2(x) return x @@ -517,7 +520,7 @@ def test_bn_reshape_dense_bn_train(): net = GradWrap(NetWithLoss(BNReshapeDenseBNNet())) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - + compile(net, input) @@ -526,8 +529,8 @@ class ParallelReduceMeanNet(nn.Cell): reducemean_keep_dims=False, reducemean_axis=-1, strategy=None): super().__init__() self.conv = nn.Conv2d(in_channels=conv_in_channel, out_channels=conv_out_channel, - kernel_size=1, stride=1, pad_mode='valid', has_bias=True, - weight_init='ones', bias_init='ones') + kernel_size=1, stride=1, pad_mode='valid', has_bias=True, + weight_init='ones', bias_init='ones') self.reduce_mean = P.ReduceMean(keep_dims=reducemean_keep_dims) self.flat = nn.Flatten() self.reducemean_axis = reducemean_axis @@ -563,14 +566,15 @@ def test_flatten_reshape(parallel_mode="auto_parallel"): epoch_size = 2 context.reset_auto_parallel_context() context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8) - net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_axis=(2, 3), strategy=((4, 2, 1, 1),)) + net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_axis=(2, 3), + strategy=((4, 2, 1, 1),)) loss = CrossEntropyLoss() predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32) label = Tensor(np.ones([batch_size, 64]), dtype=ms.float32) dataset = Dataset(predict, label, 2, input_num=2) opt = Momentum(net.trainable_params(), learning_rate, momentum) - model = Model(net, loss_fn = loss, optimizer=opt) + model = Model(net, loss_fn=loss, optimizer=opt) model.train(epoch_size, dataset, dataset_sink_mode=False) @@ -582,14 +586,15 @@ def test_flatten_reshape2(parallel_mode="auto_parallel"): context.reset_auto_parallel_context() context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8) set_algo_parameters(fully_use_devices=False) - net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_axis=(2, 3), strategy=((4, 1, 1, 1),)) + net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_axis=(2, 3), + strategy=((4, 1, 1, 1),)) loss = CrossEntropyLoss() predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32) label = Tensor(np.ones([batch_size, 64]), dtype=ms.float32) dataset = Dataset(predict, label, 2, input_num=2) opt = Momentum(net.trainable_params(), learning_rate, momentum) - model = Model(net, loss_fn = loss, optimizer=opt) + model = Model(net, loss_fn=loss, optimizer=opt) model.train(epoch_size, dataset, dataset_sink_mode=False) @@ -630,7 +635,7 @@ def test_flatten_reshape3(parallel_mode="auto_parallel"): dataset = Dataset(predict, label, 2, input_num=2) opt = Momentum(net.trainable_params(), learning_rate, momentum) - model = Model(net, loss_fn = loss, optimizer=opt) + model = Model(net, loss_fn=loss, optimizer=opt) model.train(epoch_size, dataset, dataset_sink_mode=False) @@ -652,7 +657,8 @@ def test_flatten_reshape4(parallel_mode="semi_auto_parallel"): context.reset_auto_parallel_context() context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8) set_algo_parameters(fully_use_devices=False) - net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_keep_dims=True, strategy=((4, 1, 1, 1),)) + net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_keep_dims=True, + strategy=((4, 1, 1, 1),)) loss = CrossEntropyLoss2() predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32) label = Tensor(np.ones([batch_size, 2048]), dtype=ms.float32) diff --git a/tests/ut/python/parallel/test_scalar_loss.py b/tests/ut/python/parallel/test_scalar_loss.py index cf9b9a9599..5aeac960b2 100644 --- a/tests/ut/python/parallel/test_scalar_loss.py +++ b/tests/ut/python/parallel/test_scalar_loss.py @@ -23,6 +23,7 @@ from mindspore.common.api import _executor from mindspore.ops import composite as C from mindspore.ops import functional as F + class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -31,6 +32,7 @@ class GradWrap(nn.Cell): def construct(self, x, y, bias): return C.grad_all(self.network)(x, y, bias) + def test_sum_as_loss(): class Net(nn.Cell): def __init__(self, strategy0, strategy1): @@ -41,14 +43,14 @@ def test_sum_as_loss(): def construct(self, x, y, bias): out = self.fc_nobias(x, y) - out = self.reduce_sum(out, (0,1)) + out = self.reduce_sum(out, (0, 1)) out = self.mul(out, F.scalar_to_array(2.0)) return out context.set_auto_parallel_context(device_num=16, global_rank=0) - + strategy0 = ((4, 1), (4, 1)) - strategy1 = ((4, 1), ) + strategy1 = ((4, 1),) net = GradWrap(Net(strategy0, strategy1)) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() diff --git a/tests/ut/python/parallel/test_semi_auto_two_subgraphs.py b/tests/ut/python/parallel/test_semi_auto_two_subgraphs.py index 3c287a25d3..b58fe78604 100644 --- a/tests/ut/python/parallel/test_semi_auto_two_subgraphs.py +++ b/tests/ut/python/parallel/test_semi_auto_two_subgraphs.py @@ -74,7 +74,7 @@ class TrainStepWrap(nn.Cell): for params in self.trainable_params: weights_w.append(params) weights_d.append(params) - + self.weights_w = ParameterTuple(weights_w) self.weights_d = ParameterTuple(weights_d) self.optimizer_w = FTRL(learning_rate=1e-2, params=self.weights_w, diff --git a/tests/ut/python/parallel/test_set_auto_parallel_context.py b/tests/ut/python/parallel/test_set_auto_parallel_context.py index 301bb608e0..1ad717b880 100644 --- a/tests/ut/python/parallel/test_set_auto_parallel_context.py +++ b/tests/ut/python/parallel/test_set_auto_parallel_context.py @@ -17,6 +17,7 @@ from mindspore.parallel._auto_parallel_context import auto_parallel_context from mindspore import context from mindspore.parallel import set_algo_parameters + def test_set_auto_parallel_context(): context.set_auto_parallel_context(device_num=4, global_rank=3, mirror_mean=True, cast_before_mirror=False, parallel_mode="auto_parallel", parameter_broadcast=False) diff --git a/tests/ut/python/parallel/test_sigmoid_cross_entropy_with_logits.py b/tests/ut/python/parallel/test_sigmoid_cross_entropy_with_logits.py index 0f890ced5e..f51958370f 100644 --- a/tests/ut/python/parallel/test_sigmoid_cross_entropy_with_logits.py +++ b/tests/ut/python/parallel/test_sigmoid_cross_entropy_with_logits.py @@ -42,7 +42,7 @@ def compile(net): optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() - _executor.compile(train_net, _x, _b) + _executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_softmax_cross_entropy_expand.py b/tests/ut/python/parallel/test_softmax_cross_entropy_expand.py index ef4c2b53c3..395018abd6 100644 --- a/tests/ut/python/parallel/test_softmax_cross_entropy_expand.py +++ b/tests/ut/python/parallel/test_softmax_cross_entropy_expand.py @@ -19,6 +19,7 @@ from mindspore.common.api import _executor from mindspore import context import numpy as np + def test_SoftmaxCrossEntropy(): net = SoftmaxCrossEntropyExpand(sparse=True) context.set_auto_parallel_context(parallel_mode="auto_parallel") diff --git a/tests/ut/python/parallel/test_softmax_cross_entropy_loss.py b/tests/ut/python/parallel/test_softmax_cross_entropy_loss.py index a097197a8b..cf80a1fef1 100644 --- a/tests/ut/python/parallel/test_softmax_cross_entropy_loss.py +++ b/tests/ut/python/parallel/test_softmax_cross_entropy_loss.py @@ -22,6 +22,7 @@ import mindspore as ms from mindspore.common.api import _executor from mindspore.ops import composite as C + class NetWithLoss(nn.Cell): def __init__(self, network, strategy3=None): super(NetWithLoss, self).__init__() @@ -61,7 +62,7 @@ def test_softmax_cross_entropy_loss(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((4, 1), (2, 1)) - strategy2 = ((4, 2), ) + strategy2 = ((4, 2),) strategy3 = ((8, 1), (8, 1)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2), strategy3)) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -86,7 +87,7 @@ def test_softmax_cross_entropy_loss_repeated_calculation(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((4, 1), (2, 1)) - strategy2 = ((4, 2), ) + strategy2 = ((4, 2),) strategy3 = ((2, 1), (2, 1)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2), strategy3)) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") diff --git a/tests/ut/python/parallel/test_split_grad_sens.py b/tests/ut/python/parallel/test_split_grad_sens.py index fe7f4085ec..2bdd74bca3 100644 --- a/tests/ut/python/parallel/test_split_grad_sens.py +++ b/tests/ut/python/parallel/test_split_grad_sens.py @@ -71,7 +71,7 @@ def test_no_grad(): return out context.set_auto_parallel_context(device_num=8, global_rank=0) - + strategy1 = ((4, 2), (2, 1)) strategy2 = ((2, 4), (4, 1)) net = Net(strategy1, strategy2) @@ -96,7 +96,7 @@ def test_grad_sens_parameter_type(): return out context.set_auto_parallel_context(device_num=8, global_rank=0) - + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") strategy1 = ((4, 2), (2, 1)) strategy2 = ((2, 4), (4, 1)) @@ -125,7 +125,7 @@ def test_grad_sens_tensor_type(): return out context.set_auto_parallel_context(device_num=8, global_rank=0) - + strategy1 = ((4, 2), (2, 1)) strategy2 = ((2, 4), (4, 1)) net = GradWrap2(Net(strategy1, strategy2)) @@ -146,12 +146,12 @@ def test_grad_sens_scalar_broadcast(): def construct(self, x, y, bias): out = self.fc_nobias(x, y) - out = self.reduce_sum(out, (0,1)) + out = self.reduce_sum(out, (0, 1)) return out context.set_auto_parallel_context(device_num=16, global_rank=0) strategy0 = ((4, 1), (4, 1)) - strategy1 = ((4, 1), ) + strategy1 = ((4, 1),) net = GradWrap3(Net(strategy0, strategy1)) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") diff --git a/tests/ut/python/parallel/test_square.py b/tests/ut/python/parallel/test_square.py index a2797f7333..49dd534356 100644 --- a/tests/ut/python/parallel/test_square.py +++ b/tests/ut/python/parallel/test_square.py @@ -44,14 +44,14 @@ def compile_net(net): optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() - _executor.compile(train_net, _x, _b) + _executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() def test_square_data_parallel(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) strategy1 = ((16, 1, 1), (16, 1, 1)) - strategy2 = ((16, 1, 1), ) + strategy2 = ((16, 1, 1),) net = Net(_w1, strategy1, strategy2) compile_net(net) @@ -59,7 +59,7 @@ def test_square_data_parallel(): def test_square_model_parallel(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) strategy1 = ((1, 1, 16), (1, 1, 16)) - strategy2 = ((1, 1, 16), ) + strategy2 = ((1, 1, 16),) net = Net(_w1, strategy1, strategy2) compile_net(net) @@ -67,7 +67,7 @@ def test_square_model_parallel(): def test_square_hybrid_parallel(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) strategy1 = ((2, 2, 4), (2, 2, 4)) - strategy2 = ((2, 2, 4), ) + strategy2 = ((2, 2, 4),) net = Net(_w1, strategy1, strategy2) compile_net(net) @@ -81,6 +81,6 @@ def test_square_auto_parallel(): def test_square_repeat_calc(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) strategy1 = ((2, 2, 4), (2, 2, 4)) - strategy2 = ((1, 2, 2), ) + strategy2 = ((1, 2, 2),) net = Net(_w1, strategy1, strategy2) compile_net(net) diff --git a/tests/ut/python/parallel/test_squeeze_info.py b/tests/ut/python/parallel/test_squeeze_info.py index fb726055fe..8590f37619 100644 --- a/tests/ut/python/parallel/test_squeeze_info.py +++ b/tests/ut/python/parallel/test_squeeze_info.py @@ -38,13 +38,13 @@ _b = Tensor(np.ones([64, 32]), dtype=ms.float32) def compile(net): net.set_auto_parallel() - _executor.compile(net, _x, _b) + _executor.compile(net, _x, _b) context.reset_auto_parallel_context() def test_squeeze_data_parallel(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) - strategy1 = ((16, 1, 1, 1), ) + strategy1 = ((16, 1, 1, 1),) strategy2 = ((16, 1), (16, 1)) net = Net(strategy1, strategy2) compile(net) @@ -52,7 +52,7 @@ def test_squeeze_data_parallel(): def test_squeeze_model_parallel(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) - strategy1 = ((1, 1, 16, 1), ) + strategy1 = ((1, 1, 16, 1),) strategy2 = ((1, 16), (1, 16)) net = Net(strategy1, strategy2) compile(net) @@ -60,7 +60,7 @@ def test_squeeze_model_parallel(): def test_squeeze_specified_axis(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) - strategy1 = ((4, 1, 4, 1), ) + strategy1 = ((4, 1, 4, 1),) strategy2 = ((8, 2), (8, 2)) net = Net(strategy1, strategy2, (1, 3)) compile(net) @@ -74,7 +74,7 @@ def test_squeeze_auto_parallel(): def test_squeeze_repeat_calc(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) - strategy1 = ((1, 1, 8, 1), ) + strategy1 = ((1, 1, 8, 1),) strategy2 = ((2, 8), (2, 8)) net = Net(strategy1, strategy2) compile(net) diff --git a/tests/ut/python/parallel/test_strategy_checkpoint.py b/tests/ut/python/parallel/test_strategy_checkpoint.py index 8edf6dbc72..1aabb7736c 100644 --- a/tests/ut/python/parallel/test_strategy_checkpoint.py +++ b/tests/ut/python/parallel/test_strategy_checkpoint.py @@ -36,7 +36,6 @@ def test_six_matmul_save(): predict = self.network(x1, x6) return self.loss(predict) - class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -86,6 +85,7 @@ def test_six_matmul_save(): x6 = Tensor(np.ones([128, 32]), dtype=ms.float32) _executor.compile(net, x1, x6) + # remove matmul2, add matmul7 def test_six_matmul_load(): class NetWithLoss(nn.Cell): @@ -98,7 +98,6 @@ def test_six_matmul_load(): predict = self.network(x1, x6, x7) return self.loss(predict) - class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -148,6 +147,7 @@ def test_six_matmul_load(): x7 = Tensor(np.ones([32, 32]), dtype=ms.float32) _executor.compile(net, x1, x6, x7) + # model_parallel test def test_six_matmul_save_auto(): class NetWithLoss(nn.Cell): @@ -160,7 +160,6 @@ def test_six_matmul_save_auto(): predict = self.network(x1, x6) return self.loss(predict) - class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -204,6 +203,7 @@ def test_six_matmul_save_auto(): x6 = Tensor(np.ones([128, 32]), dtype=ms.float32) _executor.compile(net, x1, x6) + # remove matmul2, add matmul7 def test_six_matmul_load_auto(): class NetWithLoss(nn.Cell): @@ -216,7 +216,6 @@ def test_six_matmul_load_auto(): predict = self.network(x1, x6, x7) return self.loss(predict) - class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -262,4 +261,4 @@ def test_six_matmul_load_auto(): x1 = Tensor(np.ones([32, 32]), dtype=ms.float32) x6 = Tensor(np.ones([128, 32]), dtype=ms.float32) x7 = Tensor(np.ones([32, 32]), dtype=ms.float32) - _executor.compile(net, x1, x6, x7) \ No newline at end of file + _executor.compile(net, x1, x6, x7) diff --git a/tests/ut/python/parallel/test_sum_as_loss.py b/tests/ut/python/parallel/test_sum_as_loss.py index b5dc332eba..9a498497d2 100644 --- a/tests/ut/python/parallel/test_sum_as_loss.py +++ b/tests/ut/python/parallel/test_sum_as_loss.py @@ -49,10 +49,9 @@ def test_sum_as_loss(): out = self.reduce_sum(out, (0, 1)) return out - context.set_auto_parallel_context(device_num=16, global_rank=0) strategy0 = ((4, 1), (4, 1)) - strategy1 = ((4, 1), ) + strategy1 = ((4, 1),) net = GradWrap(Net(strategy0, strategy1)) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -74,10 +73,9 @@ def test_sum_as_loss2(): out = self.reduce_sum(out, (0, 1)) return out - context.set_auto_parallel_context(device_num=16, global_rank=0) strategy0 = ((4, 1), (4, 1)) - strategy1 = ((1, 1), ) + strategy1 = ((1, 1),) net = GradWrap(Net(strategy0, strategy1)) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") diff --git a/tests/ut/python/parallel/test_transpose.py b/tests/ut/python/parallel/test_transpose.py index 70e7ea8e25..8865a72a50 100644 --- a/tests/ut/python/parallel/test_transpose.py +++ b/tests/ut/python/parallel/test_transpose.py @@ -25,7 +25,6 @@ from tests.dataset_mock import MindData from mindspore import context - class Dataset(MindData): def __init__(self, predict, label, length=3): super(Dataset, self).__init__(size=length) @@ -73,7 +72,8 @@ def transpose_common(strategy1, strategy2): epoch_size = 2 context.reset_auto_parallel_context() - context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, device_num=8, parameter_broadcast=False) + context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, device_num=8, + parameter_broadcast=False) predict = Tensor(np.ones([32, 128]), dtype=ms.float32) label = Tensor(np.ones([32]), dtype=ms.int32) @@ -90,18 +90,17 @@ def transpose_common(strategy1, strategy2): def test_transpose1(): - strategy1 = ((1, 8), ) - strategy2 = ((1, 8), ) + strategy1 = ((1, 8),) + strategy2 = ((1, 8),) transpose_common(strategy1, strategy2) def test_transpose2(): - strategy1=((1, 4), ) - strategy2=((1, 8), ) + strategy1 = ((1, 4),) + strategy2 = ((1, 8),) transpose_common(strategy1, strategy2) if __name__ == '__main__': test_transpose1() test_transpose2() - diff --git a/tests/ut/python/parallel/test_two_matmul.py b/tests/ut/python/parallel/test_two_matmul.py index c32f46a269..8925681588 100644 --- a/tests/ut/python/parallel/test_two_matmul.py +++ b/tests/ut/python/parallel/test_two_matmul.py @@ -70,7 +70,7 @@ def test_two_matmul(): x = Tensor(np.ones([128, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) b = Tensor(np.ones([64, 64]), dtype=ms.float32) - + compile(net, x, y, b) diff --git a/tests/ut/python/parallel/test_two_weights_parameter.py b/tests/ut/python/parallel/test_two_weights_parameter.py index df61f3543f..e7005f654d 100644 --- a/tests/ut/python/parallel/test_two_weights_parameter.py +++ b/tests/ut/python/parallel/test_two_weights_parameter.py @@ -22,6 +22,7 @@ from mindspore.common.api import _executor from mindspore.ops import composite as C from mindspore.ops import functional as F + class NetWithLoss(nn.Cell): def __init__(self, network, strategy3): super(NetWithLoss, self).__init__() @@ -32,17 +33,19 @@ class NetWithLoss(nn.Cell): predict = self.network(x) return self.loss(predict, b)[0] + class OneStepCell(nn.Cell): def __init__(self, network): super(OneStepCell, self).__init__(auto_prefix=False) self.network = network self.weights = ParameterTuple(network.network.trainable_params()) - def construct(self, data, label): + def construct(self, data, label): weights = self.weights grads = C.grad_by_list(self.network, weights)(data, label) return grads + def test_two_weights_parameter(): class Net(nn.Cell): def __init__(self, strategy1, strategy2, weight, weight2): @@ -57,7 +60,6 @@ def test_two_weights_parameter(): out = self.matmul2(out, self.weight2) return out - context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((4, 1), (1, 2)) strategy2 = ((4, 2), (2, 1)) diff --git a/tests/ut/python/parallel/test_using_seed_for_initializer.py b/tests/ut/python/parallel/test_using_seed_for_initializer.py index 46ee93f605..0efea48cc9 100644 --- a/tests/ut/python/parallel/test_using_seed_for_initializer.py +++ b/tests/ut/python/parallel/test_using_seed_for_initializer.py @@ -18,7 +18,6 @@ from mindspore import Parameter import mindspore.common.initializer as init from numpy import allclose - parameter_shape = [16, 4] @@ -27,9 +26,11 @@ class ParameterNet(nn.Cell): super(ParameterNet, self).__init__() self.para_xavier_uniform = Parameter(init.initializer('xavier_uniform', parameter_shape), name="xavier_uniform") self.para_he_uniform = Parameter(init.initializer('he_uniform', parameter_shape), name="he_uniform") - self.para_xavier_uniform2 = Parameter(init.initializer(init.XavierUniform(), parameter_shape), name="xavier_uniform2") + self.para_xavier_uniform2 = Parameter(init.initializer(init.XavierUniform(), parameter_shape), + name="xavier_uniform2") self.para_he_uniform2 = Parameter(init.initializer(init.HeUniform(), parameter_shape), name="he_uniform2") - self.para_truncated_normal = Parameter(init.initializer(init.TruncatedNormal(), parameter_shape), name="truncated_normal") + self.para_truncated_normal = Parameter(init.initializer(init.TruncatedNormal(), parameter_shape), + name="truncated_normal") self.para_normal = Parameter(init.initializer(init.Normal(), parameter_shape), name="normal") self.para_uniform = Parameter(init.initializer(init.Uniform(), parameter_shape), name="uniform") diff --git a/tests/ut/python/parallel/test_virtual_dataset_3_input.py b/tests/ut/python/parallel/test_virtual_dataset_3_input.py index 484e31c21e..583dca7db3 100644 --- a/tests/ut/python/parallel/test_virtual_dataset_3_input.py +++ b/tests/ut/python/parallel/test_virtual_dataset_3_input.py @@ -64,7 +64,7 @@ def test_virtual_dataset_3_input(): strategy0 = ((2, 1), (2, 1), (2, 1)) strategy1 = ((2, 2), (2, 2)) strategy2 = ((2, 2), (2, 2)) - strategy3 = ((2, 4), ) + strategy3 = ((2, 4),) net = GradWrap(NetWithLoss(Net(strategy0, strategy1, strategy2, strategy3))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") context.set_auto_parallel_context(device_num=8, global_rank=0) @@ -74,6 +74,7 @@ def test_virtual_dataset_3_input(): net.set_auto_parallel() _executor.compile(net, x, y, b) + def test_virtualdataset_cell_3_inputs(): class Net(nn.Cell): def __init__(self, strategy0, strategy1, strategy2, strategy3): diff --git a/tests/ut/python/pipeline/infer/test_hypermap_specialize.py b/tests/ut/python/pipeline/infer/test_hypermap_specialize.py index 633e696dbe..287cc214dc 100644 --- a/tests/ut/python/pipeline/infer/test_hypermap_specialize.py +++ b/tests/ut/python/pipeline/infer/test_hypermap_specialize.py @@ -25,9 +25,11 @@ from mindspore.common.api import ms_function context.set_context(mode=context.GRAPH_MODE) + def test_hypermap_specialize_param(): class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.mul = P.Mul() @@ -51,4 +53,4 @@ def test_hypermap_specialize_param(): expected_ret = (Tensor(np.full(1, 5).astype(np.int32)), Tensor(np.full(2, 5).astype(np.int32))) ret = hypermap_specialize_param() - assert(ret == (expected_ret, expected_ret)) + assert (ret == (expected_ret, expected_ret)) diff --git a/tests/ut/python/pipeline/infer/test_net_infer.py b/tests/ut/python/pipeline/infer/test_net_infer.py index e1b3a07267..003aad827d 100644 --- a/tests/ut/python/pipeline/infer/test_net_infer.py +++ b/tests/ut/python/pipeline/infer/test_net_infer.py @@ -21,6 +21,7 @@ from mindspore import Tensor class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal') diff --git a/tests/ut/python/pipeline/infer/test_scalar_add_grad.py b/tests/ut/python/pipeline/infer/test_scalar_add_grad.py index e67f759371..3e030573f6 100644 --- a/tests/ut/python/pipeline/infer/test_scalar_add_grad.py +++ b/tests/ut/python/pipeline/infer/test_scalar_add_grad.py @@ -23,27 +23,36 @@ from mindspore.ops.operations import TensorAdd context.set_context(mode=context.GRAPH_MODE) grad = C.GradOperation('get_all', get_all=True, sens_param=True) + + class TensorAddNetMe(Cell): """ TensorAddNetMe definition """ + def __init__(self): super(TensorAddNetMe, self).__init__() self.relu = ReLU() self.add = TensorAdd() + def construct(self, inputA, inputB): inputA = self.relu(inputA) inputB = self.relu(inputB) x = self.add(inputA, inputB) x = self.relu(x) return x + + class GradWrap2(Cell): """ GradWrap2 definition """ + def __init__(self, network): super(GradWrap2, self).__init__() self.network = network + def construct(self, inputA, inputB, sens): gout = grad(self.network)(inputA, inputB, sens) return gout + def gen_forwarddata(inputA, inputB): """ gen_forwarddata """ net_me = TensorAddNetMe() @@ -51,6 +60,7 @@ def gen_forwarddata(inputA, inputB): output = net_me(Tensor(inputA), Tensor(inputB)) print(output) + def gen_backwarddata(inputA, inputB, inputGrad): """ gen_backwarddata """ net_me = GradWrap2(TensorAddNetMe()) @@ -58,12 +68,14 @@ def gen_backwarddata(inputA, inputB, inputGrad): output = net_me(Tensor(inputA), Tensor(inputB), Tensor(inputGrad)) print(output) + def test_scalar_tennsor_add(): """ test_scalar_tennsor_add """ inputa = np.array(32).astype(np.float32) inputb = np.random.randn(1280, 768).astype(np.float32) gen_forwarddata(inputa, inputb) + def test_scalar_tennsor_gradadd(): """ test_scalar_tennsor_gradadd """ inputa = np.array(32).astype(np.float32) diff --git a/tests/ut/python/pipeline/parse/test_celllist.py b/tests/ut/python/pipeline/parse/test_celllist.py index c20a19b43c..d2d866c7b0 100644 --- a/tests/ut/python/pipeline/parse/test_celllist.py +++ b/tests/ut/python/pipeline/parse/test_celllist.py @@ -22,6 +22,8 @@ from mindspore.nn import SequentialCell from mindspore.nn import AvgPool2d from mindspore import context from ...ut_filter import non_graph_engine + + # pylint: disable=W0212 diff --git a/tests/ut/python/pipeline/parse/test_compile.py b/tests/ut/python/pipeline/parse/test_compile.py index 812a96aef2..fe54c2bfa5 100644 --- a/tests/ut/python/pipeline/parse/test_compile.py +++ b/tests/ut/python/pipeline/parse/test_compile.py @@ -33,6 +33,7 @@ log.setLevel(level=logging.ERROR) class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal') @@ -63,6 +64,7 @@ def test_build(): # Test case 2 : test the use different args to run graph class Net2(nn.Cell): """ Net2 definition """ + def __init__(self): super(Net2, self).__init__() self.relu = nn.ReLU() diff --git a/tests/ut/python/pipeline/parse/test_cont_break.py b/tests/ut/python/pipeline/parse/test_cont_break.py index d556981a7b..23e468acac 100644 --- a/tests/ut/python/pipeline/parse/test_cont_break.py +++ b/tests/ut/python/pipeline/parse/test_cont_break.py @@ -18,6 +18,7 @@ from mindspore.nn import Cell from mindspore import Tensor, Model, context from ...ut_filter import non_graph_engine + def run_test(netclass, count): context.set_context(mode=context.GRAPH_MODE) net = netclass() @@ -25,12 +26,13 @@ def run_test(netclass, count): for _ in range(count): input_np = np.random.randn(2, 3).astype(np.float32) input_ms = Tensor(input_np) - output_np = net.construct(input_np) # run python - output_ms = model.predict(input_ms) # run graph + output_np = net.construct(input_np) # run python + output_ms = model.predict(input_ms) # run graph assert np.shape(output_np) == np.shape(output_ms.asnumpy()) # Disable equal assert because UT in CI use fake backend. # np.testing.assert_array_almost_equal(output_np, output_ms.asnumpy(), decimal=3) + class for_loop_with_break(Cell): def __init__(self): super().__init__() @@ -44,10 +46,12 @@ class for_loop_with_break(Cell): pass return x + @non_graph_engine def test_for_loop_with_break(): run_test(for_loop_with_break, 10) + class for_loop_with_continue(Cell): def __init__(self): super().__init__() @@ -60,10 +64,12 @@ class for_loop_with_continue(Cell): x = x * 2 return x + @non_graph_engine def test_for_loop_with_continue(): run_test(for_loop_with_continue, 10) + class for_loop_with_cont_break(Cell): def __init__(self): super().__init__() @@ -81,10 +87,12 @@ class for_loop_with_cont_break(Cell): pass return x + @non_graph_engine def test_for_loop_with_cont_break(): run_test(for_loop_with_cont_break, 10) + class for_nested_loop_with_break(Cell): def __init__(self): super().__init__() @@ -98,10 +106,12 @@ class for_nested_loop_with_break(Cell): x = x * 1.5 return x + @non_graph_engine def test_for_nested_loop_with_break(): run_test(for_nested_loop_with_break, 10) + class while_with_break(Cell): def __init__(self): super().__init__() @@ -116,10 +126,12 @@ class while_with_break(Cell): i += 1 return x + @non_graph_engine def test_while_with_break(): run_test(while_with_break, 10) + class while_with_continue(Cell): def __init__(self): super().__init__() @@ -135,10 +147,12 @@ class while_with_continue(Cell): i += 1 return x + @non_graph_engine def test_while_with_continue(): run_test(while_with_continue, 10) + class while_for_nested(Cell): def __init__(self): super().__init__() @@ -157,10 +171,12 @@ class while_for_nested(Cell): i += 1 return x + @non_graph_engine def test_while_for_nested(): run_test(while_for_nested, 10) + class pass_branch(Cell): def __init__(self): super().__init__() @@ -175,6 +191,7 @@ class pass_branch(Cell): i += 1 return x + @non_graph_engine def test_pass_branch(): run_test(pass_branch, 10) diff --git a/tests/ut/python/pipeline/parse/test_create_obj.py b/tests/ut/python/pipeline/parse/test_create_obj.py index 370445cf99..0807c601e9 100644 --- a/tests/ut/python/pipeline/parse/test_create_obj.py +++ b/tests/ut/python/pipeline/parse/test_create_obj.py @@ -36,6 +36,7 @@ log.setLevel(level=logging.ERROR) class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.softmax = nn.Softmax(0) @@ -66,6 +67,7 @@ def test_create_cell_object_on_construct(): # Test: creat CELL OR Primitive instance on construct class Net1(nn.Cell): """ Net1 definition """ + def __init__(self): super(Net1, self).__init__() self.add = P.TensorAdd() @@ -92,6 +94,7 @@ def test_create_primitive_object_on_construct(): # Test: creat CELL OR Primitive instance on construct use many parameter class NetM(nn.Cell): """ NetM definition """ + def __init__(self, name, axis): super(NetM, self).__init__() # self.relu = nn.ReLU() @@ -106,6 +109,7 @@ class NetM(nn.Cell): class NetC(nn.Cell): """ NetC definition """ + def __init__(self, tensor): super(NetC, self).__init__() self.tensor = tensor diff --git a/tests/ut/python/pipeline/parse/test_fix_bug.py b/tests/ut/python/pipeline/parse/test_fix_bug.py index 65b96fac0a..8d256bda58 100644 --- a/tests/ut/python/pipeline/parse/test_fix_bug.py +++ b/tests/ut/python/pipeline/parse/test_fix_bug.py @@ -22,6 +22,7 @@ from mindspore.common.api import _executor class assignment1_Net(nn.Cell): """ assignment1_Net definition """ + def __init__(self, number): super().__init__() self.number = number @@ -36,6 +37,7 @@ class assignment1_Net(nn.Cell): class assignment2_Net(nn.Cell): """ assignment2_Net definition """ + def __init__(self, number): super().__init__() self.number = number @@ -72,6 +74,7 @@ def test_ME_assignment_operator_0020(): class unsupported_method_net(nn.Cell): """ unsupported_method_net definition """ + def __init__(self): super().__init__() self.relu = nn.ReLU() diff --git a/tests/ut/python/pipeline/parse/test_for_stmt.py b/tests/ut/python/pipeline/parse/test_for_stmt.py index 2f7d2540b7..f6cce1969d 100644 --- a/tests/ut/python/pipeline/parse/test_for_stmt.py +++ b/tests/ut/python/pipeline/parse/test_for_stmt.py @@ -34,6 +34,7 @@ class Access: class access2_net(Cell): """ access2_net definition """ + def __init__(self, number, loop_count=1): super().__init__() self.number = number diff --git a/tests/ut/python/pipeline/parse/test_graph_return_const_param.py b/tests/ut/python/pipeline/parse/test_graph_return_const_param.py index d12e99b7b6..befb3ee5a4 100644 --- a/tests/ut/python/pipeline/parse/test_graph_return_const_param.py +++ b/tests/ut/python/pipeline/parse/test_graph_return_const_param.py @@ -19,6 +19,7 @@ import mindspore.nn as nn from mindspore import context import mindspore.common.dtype as mstype from mindspore.common.tensor import Tensor + context.set_context(mode=context.GRAPH_MODE) diff --git a/tests/ut/python/pipeline/parse/test_operator.py b/tests/ut/python/pipeline/parse/test_operator.py index 5356fb176f..8dab3296f3 100644 --- a/tests/ut/python/pipeline/parse/test_operator.py +++ b/tests/ut/python/pipeline/parse/test_operator.py @@ -23,6 +23,7 @@ from ...ut_filter import non_graph_engine class arithmetic_Net(Cell): """ arithmetic_Net definition """ + def __init__(self, symbol, loop_count=(1, 3)): super().__init__() self.symbol = symbol @@ -68,6 +69,7 @@ class arithmetic_Net(Cell): class logical_Net(Cell): """ logical_Net definition """ + def __init__(self, symbol, loop_count=(1, 3)): super().__init__() self.symbol = symbol diff --git a/tests/ut/python/pipeline/parse/test_parse.py b/tests/ut/python/pipeline/parse/test_parse.py index 03e1dc2ffc..114ae0c149 100644 --- a/tests/ut/python/pipeline/parse/test_parse.py +++ b/tests/ut/python/pipeline/parse/test_parse.py @@ -28,6 +28,7 @@ from mindspore.common.api import ms_function, _executor from mindspore.ops.composite import core from mindspore.ops.functional import tensor_add from ...ut_filter import non_graph_engine + # pylint: disable=W0613 # W0613: unused-argument @@ -35,9 +36,11 @@ from ...ut_filter import non_graph_engine log = logging.getLogger("test") log.setLevel(level=logging.ERROR) + # Test case: use the parse obj interface use default parameter class Net(nn.Cell): """ Net definition """ + def __init__(self, dim): super(Net, self).__init__() self.softmax1 = nn.Softmax(dim) @@ -64,10 +67,10 @@ def test_parse_defalut_parameter_case2(): log.debug("output value = %r", value) - # Test case: use the variable parameter for parse object class Net1(nn.Cell): """ Net1 definition """ + def __init__(self): super(Net1, self).__init__() @@ -93,10 +96,10 @@ def test_var_parameter_case2(): _executor.compile(net, input_data, input1, input2) - # Test case: test the global flag g_x = Tensor(np.ones([3, 3]).astype(np.float32)) + @ms_function def tensor_add_global(x): """ tensor_add_global """ @@ -116,6 +119,7 @@ def test_global_flag(): class NetWithNDarray(nn.Cell): """ NetWithNDarray definition """ + def __init__(self, dim): super(NetWithNDarray, self).__init__() self.softmax = nn.Softmax(dim) @@ -124,6 +128,7 @@ class NetWithNDarray(nn.Cell): def construct(self, input_data): return self.softmax(input_data) * self.x + @non_graph_engine def test_net_with_ndarray(): """ test_net_with_ndarray """ diff --git a/tests/ut/python/pipeline/parse/test_serialize.py b/tests/ut/python/pipeline/parse/test_serialize.py index 7dc9985e93..db76b4e934 100644 --- a/tests/ut/python/pipeline/parse/test_serialize.py +++ b/tests/ut/python/pipeline/parse/test_serialize.py @@ -21,6 +21,7 @@ import os from mindspore._extends.parse import dump_obj from mindspore._extends.parse import load_obj + def test_load_dump(): data = (1, 3, 2, 7, 9) file_name = dump_obj(data, "./") diff --git a/tests/ut/python/pynative_mode/__init__.py b/tests/ut/python/pynative_mode/__init__.py index 4317e06379..5b5a07669b 100644 --- a/tests/ut/python/pynative_mode/__init__.py +++ b/tests/ut/python/pynative_mode/__init__.py @@ -15,6 +15,7 @@ """setup for pytest""" from mindspore import context + # pylint: disable=unused-argument def setup_module(module): context.set_context(mode=context.PYNATIVE_MODE) diff --git a/tests/ut/python/pynative_mode/engine/test_cell_wrapper.py b/tests/ut/python/pynative_mode/engine/test_cell_wrapper.py index 396bd28ffa..c1f43f3e7f 100644 --- a/tests/ut/python/pynative_mode/engine/test_cell_wrapper.py +++ b/tests/ut/python/pynative_mode/engine/test_cell_wrapper.py @@ -24,6 +24,7 @@ from ...ut_filter import non_graph_engine class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight") diff --git a/tests/ut/python/pynative_mode/ge/model/test_lenet_model.py b/tests/ut/python/pynative_mode/ge/model/test_lenet_model.py index 1007d0d392..a34241d791 100644 --- a/tests/ut/python/pynative_mode/ge/model/test_lenet_model.py +++ b/tests/ut/python/pynative_mode/ge/model/test_lenet_model.py @@ -25,6 +25,7 @@ from ....ut_filter import non_graph_engine class LeNet5(nn.Cell): """ LeNet5 definition """ + def __init__(self): super(LeNet5, self).__init__() self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid') diff --git a/tests/ut/python/pynative_mode/ge/ops/test_batchnorm.py b/tests/ut/python/pynative_mode/ge/ops/test_batchnorm.py index 029092e26b..f2f06225e8 100644 --- a/tests/ut/python/pynative_mode/ge/ops/test_batchnorm.py +++ b/tests/ut/python/pynative_mode/ge/ops/test_batchnorm.py @@ -23,10 +23,10 @@ from ....ut_filter import non_graph_engine @non_graph_engine def test_bn2d(): """ut of nn.BatchNorm2d""" - gamma = Tensor(np.random.randn(64).astype(np.float32)*0.01) - beta = Tensor(np.random.randn(64).astype(np.float32)*0.01) - moving_mean = Tensor(np.random.randn(64).astype(np.float32)*0.01) - moving_var = Tensor(np.random.randn(64).astype(np.float32)*0.01) + gamma = Tensor(np.random.randn(64).astype(np.float32) * 0.01) + beta = Tensor(np.random.randn(64).astype(np.float32) * 0.01) + moving_mean = Tensor(np.random.randn(64).astype(np.float32) * 0.01) + moving_var = Tensor(np.random.randn(64).astype(np.float32) * 0.01) bn = nn.BatchNorm2d(num_features=64, eps=1e-5, @@ -36,7 +36,7 @@ def test_bn2d(): moving_mean_init=moving_mean, moving_var_init=moving_var) - #3-channel RGB + # 3-channel RGB input_data = Tensor(np.random.randint(0, 10, [1, 64, 56, 56]).astype(np.float32)) # for test in infer lib output = bn.construct(input_data) diff --git a/tests/ut/python/pynative_mode/ge/ops/test_conv.py b/tests/ut/python/pynative_mode/ge/ops/test_conv.py index dede9c3159..6189bd20ee 100644 --- a/tests/ut/python/pynative_mode/ge/ops/test_conv.py +++ b/tests/ut/python/pynative_mode/ge/ops/test_conv.py @@ -19,7 +19,6 @@ import mindspore.nn as nn from mindspore import Tensor from ....ut_filter import non_graph_engine - we = Tensor(np.ones([2, 2])) in_channels = 3 out_channels = 64 @@ -29,8 +28,10 @@ ks = 3 def get_me_conv_output(input_data, weight, in_channel, out_channel, kernel_size, stride=1, padding=0, has_bias=False, bias=None): """ get_me_conv_output """ + class Net(nn.Cell): """ Net definition """ + def __init__(self, weight, in_channel, out_channel, kernel_size, stride=1, padding=0, has_bias=False, bias=None): super(Net, self).__init__() @@ -45,6 +46,7 @@ def get_me_conv_output(input_data, weight, in_channel, out_channel, kernel_size, def construct(self, input_x): return self.conv(input_x) + net = Net(weight, in_channel, out_channel, kernel_size, stride, padding, has_bias, bias) out = net.construct(input_data) return out.asnumpy() diff --git a/tests/ut/python/pynative_mode/ge/ops/test_tensor_add.py b/tests/ut/python/pynative_mode/ge/ops/test_tensor_add.py index 0ea463c848..0ea8ff7250 100644 --- a/tests/ut/python/pynative_mode/ge/ops/test_tensor_add.py +++ b/tests/ut/python/pynative_mode/ge/ops/test_tensor_add.py @@ -31,6 +31,7 @@ def test_tensor_add(): z = tensor_add(x, y) assert np.all(z.asnumpy() - (x.asnumpy() + y.asnumpy()) < 0.0001) + def test_tensor_orign_ops(): x = Tensor(np.ones([1, 3, 4, 4]).astype(np.float32)) y = Tensor(np.ones([1, 3, 4, 4]).astype(np.float32)) diff --git a/tests/ut/python/pynative_mode/nn/test_batchnorm.py b/tests/ut/python/pynative_mode/nn/test_batchnorm.py index f0fbcfd2b3..08f84f2fe3 100644 --- a/tests/ut/python/pynative_mode/nn/test_batchnorm.py +++ b/tests/ut/python/pynative_mode/nn/test_batchnorm.py @@ -57,7 +57,7 @@ def test_bn2d(): moving_mean_init=moving_mean, moving_var_init=moving_var) - #3-channel RGB + # 3-channel RGB input_data = Tensor(np.random.randint(0, 1, [1, 3, 224, 224]).astype(np.float32)) output = bn(input_data) output_np = output.asnumpy() diff --git a/tests/ut/python/pynative_mode/nn/test_cell.py b/tests/ut/python/pynative_mode/nn/test_cell.py index be56b99cd8..3894c9c60e 100644 --- a/tests/ut/python/pynative_mode/nn/test_cell.py +++ b/tests/ut/python/pynative_mode/nn/test_cell.py @@ -22,6 +22,7 @@ from ...ut_filter import non_graph_engine class ModA(nn.Cell): """ ModA definition """ + def __init__(self, tensor): super(ModA, self).__init__() self.weight = Parameter(tensor, name="weight") @@ -32,6 +33,7 @@ class ModA(nn.Cell): class ModB(nn.Cell): """ ModB definition """ + def __init__(self, tensor): super(ModB, self).__init__() self.weight = Parameter(tensor, name="weight") @@ -42,6 +44,7 @@ class ModB(nn.Cell): class ModC(nn.Cell): """ ModC definition """ + def __init__(self, ta, tb): super(ModC, self).__init__() self.mod1 = ModA(ta) @@ -68,6 +71,7 @@ class Net(nn.Cell): class Net2(nn.Cell): """ Net2 definition """ + def __init__(self, ta, tb): super(Net2, self).__init__(auto_prefix=False) self.mod1 = ModA(ta) @@ -92,7 +96,7 @@ class ConvNet(nn.Cell): self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same") self.flatten = nn.Flatten() self.fc = nn.Dense( - int(ConvNet.image_h*ConvNet.image_w*ConvNet.output_ch/(4*4)), + int(ConvNet.image_h * ConvNet.image_w * ConvNet.output_ch / (4 * 4)), num_classes) def construct(self, x): @@ -165,6 +169,7 @@ def test_exceptions(): class ModError(nn.Cell): """ ModError definition """ + def __init__(self, tensor): self.weight = Parameter(tensor, name="weight") super(ModError, self).__init__() @@ -177,6 +182,7 @@ def test_exceptions(): class ModError1(nn.Cell): """ ModError1 definition """ + def __init__(self, tensor): super().__init__() self.weight = Parameter(tensor, name="weight") @@ -191,6 +197,7 @@ def test_exceptions(): class ModError2(nn.Cell): """ ModError2 definition """ + def __init__(self, tensor): super().__init__() self.mod = ModA(tensor) @@ -257,6 +264,7 @@ def test_add_attr(): class ModAddCellError(nn.Cell): """ ModAddCellError definition """ + def __init__(self, tensor): self.mod = ModA(tensor) super().__init__() diff --git a/tests/ut/python/pynative_mode/nn/test_checkparameter.py b/tests/ut/python/pynative_mode/nn/test_checkparameter.py index ceebfcf713..a66d320217 100644 --- a/tests/ut/python/pynative_mode/nn/test_checkparameter.py +++ b/tests/ut/python/pynative_mode/nn/test_checkparameter.py @@ -18,7 +18,6 @@ import pytest from mindspore._checkparam import check_int, check_int_positive, \ check_bool, check_input_format, _expand_tuple - once = _expand_tuple(1) twice = _expand_tuple(2) triple = _expand_tuple(3) diff --git a/tests/ut/python/pynative_mode/nn/test_container.py b/tests/ut/python/pynative_mode/nn/test_container.py index 7438ed2613..8b2e537b5c 100644 --- a/tests/ut/python/pynative_mode/nn/test_container.py +++ b/tests/ut/python/pynative_mode/nn/test_container.py @@ -20,7 +20,6 @@ import pytest import mindspore.nn as nn from mindspore import Tensor - weight = Tensor(np.ones([2, 2])) conv2 = nn.Conv2d(3, 64, (3, 3), stride=2, padding=0) diff --git a/tests/ut/python/pynative_mode/nn/test_conv.py b/tests/ut/python/pynative_mode/nn/test_conv.py index 25f75f64c4..b4fe5ba038 100644 --- a/tests/ut/python/pynative_mode/nn/test_conv.py +++ b/tests/ut/python/pynative_mode/nn/test_conv.py @@ -18,7 +18,6 @@ import numpy as np import mindspore.nn as nn from mindspore import Tensor - weight = Tensor(np.ones([2, 2])) in_channels = 3 out_channels = 64 diff --git a/tests/ut/python/pynative_mode/nn/test_dense.py b/tests/ut/python/pynative_mode/nn/test_dense.py index de7f4df7b8..97f4ea6da3 100644 --- a/tests/ut/python/pynative_mode/nn/test_dense.py +++ b/tests/ut/python/pynative_mode/nn/test_dense.py @@ -18,6 +18,8 @@ import pytest import mindspore.nn as nn from mindspore import Tensor + + # pylint: disable=E1123 @@ -35,7 +37,7 @@ def test_dense_defaultbias_noactivation(): def test_dense_defaultweight(): bias = Tensor(np.array([0.5, 0.3], dtype=np.float32)) dense = nn.Dense(3, 2, bias_init=bias) - #batch_size 1 && 3-channel RGB + # batch_size 1 && 3-channel RGB input_data = Tensor(np.random.randint(0, 255, [1, 3]).astype(np.float32)) output = dense(input_data) output_np = output.asnumpy() diff --git a/tests/ut/python/pynative_mode/nn/test_dropout.py b/tests/ut/python/pynative_mode/nn/test_dropout.py index cea530db41..39ccdc75e2 100644 --- a/tests/ut/python/pynative_mode/nn/test_dropout.py +++ b/tests/ut/python/pynative_mode/nn/test_dropout.py @@ -22,6 +22,7 @@ from mindspore import context context.set_context(device_target="Ascend") + def test_check_dropout_1(): x = Tensor(np.ones([20, 16, 50]), mstype.float32) m = nn.Dropout(0.8) diff --git a/tests/ut/python/pynative_mode/nn/test_loss.py b/tests/ut/python/pynative_mode/nn/test_loss.py index a130a6eca7..2a52f26012 100644 --- a/tests/ut/python/pynative_mode/nn/test_loss.py +++ b/tests/ut/python/pynative_mode/nn/test_loss.py @@ -28,6 +28,7 @@ def test_L1Loss(): with pytest.raises(NotImplementedError): loss.construct(input_data, target_data) + @non_graph_engine def test_SoftmaxCrossEntropyWithLogits(): """ test_SoftmaxCrossEntropyWithLogits """ diff --git a/tests/ut/python/pynative_mode/nn/test_pooling.py b/tests/ut/python/pynative_mode/nn/test_pooling.py index 881685d8b6..3c98980101 100644 --- a/tests/ut/python/pynative_mode/nn/test_pooling.py +++ b/tests/ut/python/pynative_mode/nn/test_pooling.py @@ -21,6 +21,7 @@ import pytest import mindspore.nn as nn from mindspore import Tensor + def test_avgpool2d(): """ test_avgpool2d """ kernel_size = 3 @@ -28,7 +29,7 @@ def test_avgpool2d(): avg_pool = nn.AvgPool2d(kernel_size, stride) assert avg_pool.kernel_size == 3 assert avg_pool.stride == 2 - input_data = Tensor(np.random.randint(0, 255, [1, 3, 6, 6])*0.1) + input_data = Tensor(np.random.randint(0, 255, [1, 3, 6, 6]) * 0.1) output = avg_pool(input_data) output_np = output.asnumpy() assert isinstance(output_np[0][0][0][0], (np.float32, np.float64)) @@ -42,9 +43,6 @@ def test_avgpool2d_error_input(): nn.AvgPool2d(kernel_size, stride) - - - def test_maxpool2d(): """ test_maxpool2d """ kernel_size = 3 @@ -53,7 +51,7 @@ def test_maxpool2d(): max_pool = nn.MaxPool2d(kernel_size, stride, pad_mode='SAME') assert max_pool.kernel_size == 3 assert max_pool.stride == 3 - input_data = Tensor(np.random.randint(0, 255, [1, 3, 6, 6])*0.1) + input_data = Tensor(np.random.randint(0, 255, [1, 3, 6, 6]) * 0.1) output = max_pool(input_data) output_np = output.asnumpy() assert isinstance(output_np[0][0][0][0], (np.float32, np.float64)) diff --git a/tests/ut/python/pynative_mode/ops/test_hypermap.py b/tests/ut/python/pynative_mode/ops/test_hypermap.py index 141490da8a..15f11fa778 100644 --- a/tests/ut/python/pynative_mode/ops/test_hypermap.py +++ b/tests/ut/python/pynative_mode/ops/test_hypermap.py @@ -22,6 +22,7 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore.ops import functional as F from ...ut_filter import non_graph_engine + # pylint: disable=W0613 # W0613: unused-argument diff --git a/tests/ut/python/pynative_mode/ops/test_multitype.py b/tests/ut/python/pynative_mode/ops/test_multitype.py index 58fd31256d..106d0a6f61 100644 --- a/tests/ut/python/pynative_mode/ops/test_multitype.py +++ b/tests/ut/python/pynative_mode/ops/test_multitype.py @@ -23,7 +23,6 @@ from mindspore.ops import operations as P from mindspore import Tensor from ...ut_filter import non_graph_engine - tensor_add = P.TensorAdd() op_add = P.AddN() scala_add = Primitive('scalar_add') diff --git a/tests/ut/python/pynative_mode/test_bprop.py b/tests/ut/python/pynative_mode/test_bprop.py index dfe84f8173..2cba70334d 100644 --- a/tests/ut/python/pynative_mode/test_bprop.py +++ b/tests/ut/python/pynative_mode/test_bprop.py @@ -22,11 +22,14 @@ from mindspore.common import Tensor from mindspore.common.api import ms_function from ....mindspore_test_framework.utils.bprop_util import bprop + def setup_module(module): context.set_context(mode=context.PYNATIVE_MODE) + class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.matmul = P.MatMul() @@ -38,23 +41,27 @@ class Net(nn.Cell): out = self.matmul(x, y) return x, out + def test_bprop_no_sens(): grads = bprop(Net(), Tensor(np.ones([2, 3]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.float32)), wrt=['inputs']) print(grads) + def test_bprop_sens(): grads = bprop(Net(), Tensor(np.ones([2, 3]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.float32)), grads_wrt_outputs=(Tensor(np.ones([2, 3]).astype(np.float32)), Tensor(np.ones([2, 2]).astype(np.float32))), wrt=['inputs']) print(grads) + def test_bprop_first_only(): grads = bprop(Net(), Tensor(np.ones([2, 3]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.float32)), grads_wrt_outputs=(Tensor(np.ones([2, 3]).astype(np.float32)), Tensor(np.ones([2, 2]).astype(np.float32)))) print(grads) + def test_bprop_wrt_params(): net = Net() grads = bprop(net, Tensor(np.ones([2, 3]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.float32)), @@ -64,6 +71,7 @@ def test_bprop_wrt_params(): params=net.trainable_params()) print(grads) + def test_bprop_wrt_params_no_sens(): net = Net() grads = bprop(net, Tensor(np.ones([2, 3]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.float32)), @@ -71,6 +79,7 @@ def test_bprop_wrt_params_no_sens(): params=net.trainable_params()) print(grads) + def test_bprop_wrt_inputs_and_params(): net = Net() grads = bprop(net, Tensor(np.ones([2, 3]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.float32)), diff --git a/tests/ut/python/pynative_mode/test_cell_bprop.py b/tests/ut/python/pynative_mode/test_cell_bprop.py index a0e50b9959..15a1d0b1eb 100644 --- a/tests/ut/python/pynative_mode/test_cell_bprop.py +++ b/tests/ut/python/pynative_mode/test_cell_bprop.py @@ -42,6 +42,7 @@ class MulAdd(nn.Cell): # In this test case, The user defined bprop is wrong defined purposely to distinguish from ad result return 2 * dout, 2 * y + def test_grad_mul_add(): mul_add = MulAdd() assert C.grad_all(mul_add)(1, 2) == (2, 4) @@ -56,6 +57,7 @@ class InlineMulADD(nn.Cell): def construct(self, x, y): return self.mul_add(x, y) + x + self.param * y + def test_grad_inline_mul_add(): inline_mul_add = InlineMulADD() assert C.grad_all(inline_mul_add)(1, 2) == (3, 6) @@ -74,11 +76,13 @@ class WithParameter(nn.Cell): # In this test case, The user defined bprop is wrong defined purposely to distinguish from ad result return self.param1 * self.param2 * dout, 2 * y + def test_with_param(): with_param = WithParameter() with pytest.raises(RuntimeError): C.grad_all(with_param)(1, 2) + class WithNoBprop(nn.Cell): def __init__(self): super(WithNoBprop, self).__init__() @@ -86,107 +90,138 @@ class WithNoBprop(nn.Cell): def construct(self, x, y): return 2 * x + y + def test_with_no_bprop(): with_no_bprop = WithNoBprop() assert C.grad_all(with_no_bprop)(1, 2) == (2, 1) + def test_grad_in_bprop_1(): class GradInBprop_1(nn.Cell): def __init__(self): super(GradInBprop_1, self).__init__() self.relu = P.ReLU() + def construct(self, x, y): return self.relu(x) + class GradInBprop_2(nn.Cell): def __init__(self): super(GradInBprop_2, self).__init__() self.f = GradInBprop_1() + def construct(self, x, y): return self.f(x, y), C.grad_all(self.f)(x, y) + def bprop(self, x, y, out, dout): grads = C.grad_all(self.f)(x, y) return out[1][0], grads[1] + class GradInBprop_3(nn.Cell): def __init__(self): super(GradInBprop_3, self).__init__() self.f = GradInBprop_2() + def construct(self, x, y): return self.f(x, y) + grad_in_bprop = GradInBprop_3() grads = C.grad_all(grad_in_bprop)(Tensor(np.ones([2, 2]).astype(np.float32)), Tensor(np.ones([2, 2]).astype(np.float32))) assert (grads[0].asnumpy() == np.ones([2, 2]).astype(np.float32)).all() assert (grads[1].asnumpy() == np.zeros([2, 2]).astype(np.float32)).all() + def test_grad_in_bprop_2(): class GradInBprop_1(nn.Cell): def __init__(self): super(GradInBprop_1, self).__init__() self.relu = P.ReLU() + def construct(self, x, y): return self.relu(x) + def bprop(self, x, y, out, dout): return x * y, y + x + class GradInBprop_2(nn.Cell): def __init__(self): super(GradInBprop_2, self).__init__() self.f = GradInBprop_1() + def construct(self, x, y): return self.f(x, y), C.grad_all(self.f)(x, y) + def bprop(self, x, y, out, dout): grads = C.grad_all(self.f)(x, y) return out[1][0], grads[1] + class GradInBprop_3(nn.Cell): def __init__(self): super(GradInBprop_3, self).__init__() self.f = GradInBprop_2() + def construct(self, x, y): return self.f(x, y) + grad_in_bprop = GradInBprop_3() grads = C.grad_all(grad_in_bprop)(Tensor(np.ones([2, 2]).astype(np.float32)), Tensor(np.ones([2, 2]).astype(np.float32))) assert (grads[0].asnumpy() == np.ones([2, 2]).astype(np.float32)).all() assert (grads[1].asnumpy() == np.array([[2, 2], [2, 2]]).astype(np.float32)).all() + def test_grad_in_bprop_3(): class GradInBprop_1(nn.Cell): def __init__(self): super(GradInBprop_1, self).__init__() self.relu = P.ReLU() + def construct(self, x, y): return self.relu(x) + class GradInBprop_2(nn.Cell): def __init__(self): super(GradInBprop_2, self).__init__() self.f = GradInBprop_1() + def construct(self, x, y): return self.f(x, y), C.grad_all(self.f)(x, y) + def bprop(self, x, y, out, dout): grads = C.grad_all(self.f)(x, y) return out[1][0], grads[1] + class GradInBprop_3(nn.Cell): def __init__(self): super(GradInBprop_3, self).__init__() self.f = GradInBprop_2() + def construct(self, x, y): return self.f(x, y) + def bprop(self, x, y, out, dout): return x + y + y + out[0], x + x + y + y + dout[0] + grad_in_bprop = GradInBprop_3() grads = C.grad_all(grad_in_bprop)(Tensor(np.ones([2, 2]).astype(np.float32)), Tensor(np.ones([2, 2]).astype(np.float32))) assert (grads[0].asnumpy() == np.array([[4, 4], [4, 4]]).astype(np.float32)).all() assert (grads[1].asnumpy() == np.array([[5, 5], [5, 5]]).astype(np.float32)).all() + class OneInputBprop(nn.Cell): def __init__(self): super().__init__() self.op = P.ReLU() + def construct(self, x): return self.op(x) + def bprop(self, x, out, dout): return 5 * x, + def test_grad_one_input_bprop(): net = OneInputBprop() input1 = Tensor(np.ones([2, 2]).astype(np.float32)) @@ -197,19 +232,24 @@ def test_grad_one_input_bprop(): class TwoInput(nn.Cell): def __init__(self): super().__init__() + def construct(self, x, y): return x * y + class InlineBpropTwoInput(nn.Cell): def __init__(self): super().__init__() self.f = TwoInput() + def construct(self, x, y): return self.f(x, y), C.grad_all(self.f)(x, y) + def bprop(self, x, y, out, dout): grads = C.grad_all(self.f)(x, y) return grads[0] * 2, grads[1] * 2 + def test_grad_inline_bprop_two_input(): net = InlineBpropTwoInput() input1 = Tensor(np.ones([2, 2]).astype(np.float32)) @@ -224,8 +264,10 @@ class TwoInputBprop(nn.Cell): def __init__(self): super().__init__() self.op = P.Mul() + def construct(self, x, y): return self.op(x, y) + def bprop(self, x, y, out, dout): return 5 * x, 8 * y @@ -234,20 +276,25 @@ class TwoInputWithParameter(nn.Cell): def __init__(self): super().__init__() self.op = P.Mul() - self.inputdata = Parameter(initializer(1, (2,2), mstype.float32),name="global_step") + self.inputdata = Parameter(initializer(1, (2, 2), mstype.float32), name="global_step") + def construct(self, x, y): x = self.inputdata + x - return self.op(x, y) + return self.op(x, y) + class TwoInputWithOnlyInitParameterBprop(nn.Cell): def __init__(self): super().__init__() self.op = P.Mul() - self.inputdata = Parameter(initializer(1, (2,2), mstype.float32),name="global_step") + self.inputdata = Parameter(initializer(1, (2, 2), mstype.float32), name="global_step") + def construct(self, x, y): - return self.op(x, y) + return self.op(x, y) + def bprop(self, x, y, out, dout): - return 5*x, 8*y + return 5 * x, 8 * y + class InlineMutilTwoInputParameterCell(nn.Cell): def __init__(self): @@ -256,10 +303,12 @@ class InlineMutilTwoInputParameterCell(nn.Cell): self.f2 = TwoInput() self.f3 = TwoInputWithParameter() self.f4 = TwoInputWithOnlyInitParameterBprop() + def construct(self, x, y): - output = self.f1(x,y)+self.f2(x,y)+self.f3(x,y)+self.f4(x,y) + output = self.f1(x, y) + self.f2(x, y) + self.f3(x, y) + self.f4(x, y) return output + def test_grad_inline_bprop_multi_input(): net = InlineMutilTwoInputParameterCell() input1 = Tensor(np.ones([2, 2]).astype(np.float32)) @@ -270,11 +319,13 @@ def test_grad_inline_bprop_multi_input(): assert (grads[1].asnumpy() == np.array([[19, 19], [19, 19]]).astype(np.float32)).all() assert (len(grads) == 2) + class MulAddWithParam(nn.Cell): def __init__(self): super(MulAddWithParam, self).__init__() self.mul_add = MulAdd() self.param = Parameter(Tensor(np.array([[3, 2]], np.float32)), 'param') + def construct(self, x): return self.mul_add(self.param, x) @@ -293,24 +344,31 @@ def test_refkey_bprop(): class MulAddWithWrongOutputNum(nn.Cell): def __init__(self): super(MulAddWithWrongOutputNum, self).__init__() + def construct(self, x, y): return 2 * x + y + def bprop(self, x, y, out, dout): return 2 * dout, + def test_grad_mul_add_with_wrong_output_num(): mul_add = MulAddWithWrongOutputNum() with pytest.raises(TypeError): C.grad_all(mul_add)(1, 2) + class MulAddWithWrongOutputType(nn.Cell): def __init__(self): super(MulAddWithWrongOutputType, self).__init__() + def construct(self, x, y): return 2 * x + y + def bprop(self, x, y, out, dout): return 2 * dout, 2 + def test_grad_mul_add_with_wrong_output_type(): mul_add = MulAddWithWrongOutputType() with pytest.raises(TypeError): @@ -320,12 +378,15 @@ def test_grad_mul_add_with_wrong_output_type(): class MulAddWithWrongOutputShape(nn.Cell): def __init__(self): super(MulAddWithWrongOutputShape, self).__init__() - self.ones = Tensor(np.ones([2,])) + self.ones = Tensor(np.ones([2, ])) + def construct(self, x, y): return 2 * x + y + def bprop(self, x, y, out, dout): return 2, self.ones + def test_grad_mul_add_with_wrong_output_shape(): mul_add = MulAddWithWrongOutputShape() with pytest.raises(TypeError): diff --git a/tests/ut/python/pynative_mode/test_framstruct.py b/tests/ut/python/pynative_mode/test_framstruct.py index 1b9c13646a..d2b8cc8e22 100644 --- a/tests/ut/python/pynative_mode/test_framstruct.py +++ b/tests/ut/python/pynative_mode/test_framstruct.py @@ -33,6 +33,7 @@ from mindspore.ops.primitive import prim_attr_register, PrimitiveWithInfer def setup_module(module): context.set_context(mode=context.PYNATIVE_MODE) + @ms_function def while_upper_bound(upper): rval = 2 @@ -40,10 +41,12 @@ def while_upper_bound(upper): rval = rval * rval return rval + def test_while_upper_bound(): res = while_upper_bound(10) assert res == 16 + @ms_function def while_lower_bound(lower): """ t_while """ @@ -52,10 +55,12 @@ def while_lower_bound(lower): rval = rval * rval return rval + def test_while_lower_bound(): res = while_lower_bound(2) assert res == 256 + @ms_function def dynamic_make_tuple(x, lower, upper): out = () @@ -65,11 +70,13 @@ def dynamic_make_tuple(x, lower, upper): i = i + 1 return out + def test_dynamic_make_tuple(): # Dynamicly recursively creating static type is invalid in mindspore, as mindspore is a static language. with pytest.raises(RuntimeError): dynamic_make_tuple(2, 1, 5) + def test_make_tuple(): # Staticly recursively creating static type is valid in mindspore. @ms_function @@ -78,35 +85,43 @@ def test_make_tuple(): for i in range(3): out = out + (x,) return out + res = make_tuple(5) assert res == (5, 5, 5) + @ms_function def add(x, y): """ add """ return x + y + def mul(x, y): """ mul """ return x * y + def add_mul(x, y): """ add_mul """ return (x + y) * y + def mainf(x, y): """ mainf """ return C.grad_all(mul)(x, y) + def grad_add_mul(x, y): """ grad_add_mul """ return C.grad_all(add_mul)(x, y) + @ms_function def sub(x, y): """ sub """ return x - y + @ms_function def if_always_true(x): """ if_always_true """ @@ -115,6 +130,7 @@ def if_always_true(x): else: return 0 + def test_add(): """ test_add """ res = add(2.5, 3) @@ -140,28 +156,33 @@ def test_f(): res = mainf(3, 2) assert res == (2, 3) + @non_graph_engine def test_grad_add_mul(): """ test_grad_add_mul """ res = grad_add_mul(3, 2) assert res == (2, 7) + def f(x): if x > 0: - return f(x-1) + return f(x - 1) return x + @ms_function def list_subscript(): """ list_subscript """ - x= [1, 2, 3] + x = [1, 2, 3] return x[0] * x[1] + def test_list_subscript(): """ test_list_subscript """ res = list_subscript() assert res == 2 + @ms_function def ms_infer_for(xs, y): """ ms_infer_for """ @@ -170,6 +191,7 @@ def ms_infer_for(xs, y): rval = rval + x return rval + def test_infer_for(): """ test_infer_for """ t = (1, 2, 3) @@ -177,23 +199,26 @@ def test_infer_for(): res = ms_infer_for(t, y) assert res == 10 + @ms_function def if_construct(a, b): z = a if a > b: - z = a+b + z = a + b else: - z = a*b + z = a * b if z > b: - return z-a + return z - a else: - return a-b + return a - b + def test_if_construct(): """ test_if_construct """ res = if_construct(3, 6) assert res == 15 + @ms_function def if_scalar(a, b): """ if_abstract """ @@ -201,51 +226,59 @@ def if_scalar(a, b): return a return b + def test_if_scalar1(): """ test_if_abstract """ res = if_scalar(3, 6) assert res == 3 + def test_if_scalar2(): """ test_if_abstract """ res = if_scalar(0, 6) assert res == 6 + @ms_function def if_tensor(a, b): c = a if a < b: - c = a+a + c = a + a if c < b: - c = a+c + c = a + c else: - c = a+b + c = a + b else: - c = b+b + c = b + b out = c + c return out + def test_if_tensor(): res = if_tensor(Tensor(np.ones([64, 10]).astype(np.int32)), Tensor(np.ones([64, 10]).astype(np.int32))) assert res == Tensor(np.ones([64, 10]).astype(np.int32) * 4) + @ms_function def rec(x): """ rec """ if x > 0: - return rec(x-1) + return rec(x - 1) return x + def test_grad_rec(): """ test_grad_rec """ res = C.grad(rec)(10) assert res == 1 + def test_me_rec(): """ test_me_rec """ res = rec(10) assert res == 0 + @ms_function def t2_while(x, y): out = y - x @@ -255,28 +288,34 @@ def t2_while(x, y): i = i + 1 return out + def test_while2(): res = t2_while(2, 3) assert res == 6 + def test_grad_while2(): res = C.grad(t2_while)(2, 3) assert res == 3 + def if_test(a, b): """ if_test """ if a > b: return 3 * a return 2 * b + def grad_if(x, y): """ grad_if """ return C.grad_all(if_test)(x, y) + def test_grad_if(): """ test_grad_if """ assert grad_if(5, 4) == (3, 0) + # While loop is not unrolled in forward and backward graphs. def test_dont_unroll_while(): def dont_unroll_while(x, y): @@ -286,35 +325,40 @@ def test_dont_unroll_while(): out = mul(x, y) i = i + 1 return out + @ms_function() def invoke_while(x, y): return C.grad(dont_unroll_while)(x, y) + res = invoke_while(2, 3) assert res == 3 + class ConvNet(nn.Cell): def __init__(self): super(ConvNet, self).__init__() out_channel = 16 kernel_size = 3 self.conv = P.Conv2D(out_channel, - kernel_size, - mode=1, - pad_mode="pad", - pad=0, - stride=1, - dilation=2, - group=1) + kernel_size, + mode=1, + pad_mode="pad", + pad=0, + stride=1, + dilation=2, + group=1) self.w = Parameter(Tensor(np.ones([16, 16, 3, 3]).astype(np.float32)), name='w') def construct(self, x): return self.conv(x, self.w) + conv = ConvNet() c1 = Tensor([2], mstype.float32) c2 = Tensor([10], mstype.float32) c3 = Tensor([1], mstype.float32) + @ms_function def t1_while(x, y, z): out = x @@ -325,12 +369,14 @@ def t1_while(x, y, z): out = out + out return out + def test_while_net(): - y = Tensor(np.ones([1,3,3,4]).astype(np.float32)) - x = Tensor(np.ones([1,16,12,12]).astype(np.float32)) - z = Tensor(np.ones([1,16,16,16]).astype(np.float32)) + y = Tensor(np.ones([1, 3, 3, 4]).astype(np.float32)) + x = Tensor(np.ones([1, 16, 12, 12]).astype(np.float32)) + z = Tensor(np.ones([1, 16, 16, 16]).astype(np.float32)) res = t1_while(x, y, z) - assert res == Tensor(np.ones([1,16,12,12]).astype(np.float32) * 2306.0) + assert res == Tensor(np.ones([1, 16, 12, 12]).astype(np.float32) * 2306.0) + @ms_function def if_while(a, b, x, z): @@ -338,21 +384,23 @@ def if_while(a, b, x, z): i = c1 out = x if a < b: - c = a+a + c = a + a while i < c2: out = out + conv(z) i = i + c3 else: - c = b+b + c = b + b out = c + c return out + def test_if_while(): - x = Tensor(np.random.randn(1,16,12,12).astype(np.float32)) - z = Tensor(np.random.randn(1,16,16,16).astype(np.float32)) + x = Tensor(np.random.randn(1, 16, 12, 12).astype(np.float32)) + z = Tensor(np.random.randn(1, 16, 16, 16).astype(np.float32)) res = if_while(Tensor(np.ones([64, 10]).astype(np.float32)), Tensor(np.ones([64, 10]).astype(np.float32)), x, z) assert res == Tensor(np.ones([64, 10]).astype(np.float32) * 4.0) + def _while(x): """ _while """ ret = x * x @@ -362,70 +410,85 @@ def _while(x): i = i + 1 return ret + def grad_while(x): """ grad_while """ return C.grad_all(_while)(x) + def test_grad_while(): """ test_grad_while """ assert grad_while(5) == (60,) + @ms_function def factorial(n): """ factorial """ if n == 0: return 1 - return n * factorial(n-1) + return n * factorial(n - 1) + def test_factorial(): res = factorial(3) assert res == 6 + def test_grad_factorial(): res = C.grad(factorial)(3) assert res == 11 + @ms_function def factorial2(n): """ factorial """ if n != 0: - return n * factorial2(n-1) + return n * factorial2(n - 1) elif n == 1: - return 1 * factorial2(n-1) + return 1 * factorial2(n - 1) else: return 1 + + def test_factorial2(): res = factorial2(3) assert res == 6 + @ms_function def foo(n): if n <= 1: if n == 1: - return foo(n-1) + return foo(n - 1) else: return 1 else: - return foo(n-1) + return foo(n - 1) + + def test_foo(): res = foo(5) assert res == 1 + @ms_function def double_nested_loop(x): i = 0 s = 0 - while(i < x): + while (i < x): j = 0 i = i + 1 - while(j < 3): + while (j < 3): j = j + 1 s = s + j return s + + def test_nested_loop(): res = double_nested_loop(3) assert res == 18 + @ms_function def double_nested_loop2(x): s = 0 @@ -433,10 +496,13 @@ def double_nested_loop2(x): for j in range(3): s = s + j return s + + def test_nested_loop2(): res = double_nested_loop(1) assert res == 6 + def _for(x): """ _for """ ret = x * x @@ -444,57 +510,73 @@ def _for(x): ret = ret * i return ret + def grad_for(x): """ grad_for """ return C.grad_all(_for)(x) + def test_grad_for(): """ test_grad_for """ assert grad_for(5) == (60,) + @ms_function def try_tail(x): """ try_tail """ return C.tail(x) + @non_graph_engine def test_tail(): """ test_tail """ try_tail((0, 1, 2, 3)) + @ms_function def zero_like_tensor(x): """ zero_like_tensor """ return C.zeros_like(x) + def test_zeros(): """ test_zeros """ x = Tensor(np.ones([2, 3]).astype(np.int32)) res = zero_like_tensor(x) assert res == Tensor(np.zeros([2, 3]).astype(np.int32)) + def test_ScalarGradChecker(): """ test_ScalarGradChecker """ + def scalar_f(x, y): return x * y + check_gradient(scalar_f, 1.0, 4.0, grad_checker_class=ScalarGradChecker, sampling_times=1) + def test_GradCheckerPrimitive(): """ test_GradCheckerPrimitive """ matmul = P.MatMul() + def prim_f(x, y): return matmul(x, y) + check_gradient(prim_f, Tensor(np.array([[0.65, 0.8, 0.8]], np.float32)), Tensor(np.array([[0.1], [0.2], [-.1]], np.float32)), grad_checker_class=OperationGradChecker, sampling_times=2) + def test_NNGradChecker(): """ test_NNGradChecker """ + class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.dense = nn.Dense(10, 10) + def construct(self, x): out = self.dense(x) return out @@ -504,18 +586,23 @@ def test_NNGradChecker(): max_error=1e-3, grad_checker_class=NNGradChecker, sampling_times=3) + def test_OperationGradChecker(): """ test_OperationGradChecker """ + class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.matmul = P.MatMul() self.z = Parameter(Tensor(np.array([1.0], np.float32)), name='z') + def construct(self, x, y): x = x * self.z out = self.matmul(x, y) return out + check_gradient(Net(), Tensor(np.array([[0.65, 0.8, 0.8]], np.float32)), Tensor(np.array([[0.1], [0.2], [-.1]], np.float32)), grad_checker_class=OperationGradChecker, input_selector=[1], sampling_times=2) @@ -523,23 +610,29 @@ def test_OperationGradChecker(): def test_ScalarJacobianChecker(): """ test_ScalarJacobianChecker """ + def scalar_f(x, y): return x * y + check_jacobian(scalar_f, 1.0, 4.0, grad_checker_class=ScalarGradChecker, input_selector=[0]) def test_OperationJacobianChecker(): """ test_OperationJacobianChecker """ + class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.matmul = P.MatMul() self.z = Parameter(Tensor(np.array([1.0], np.float32)), name='z') + def construct(self, x, y): x = x * self.z out = self.matmul(x, y) return x, out + check_jacobian(Net(), Tensor(np.array([[0.65, 0.8, 0.8], [0.1, 0.2, 0.3]], np.float32)), Tensor(np.array([[0.1, 0.3], [0.2, 0.2], [-.1, 0.4]], np.float32)), grad_checker_class=OperationGradChecker, input_selector=[0], @@ -548,11 +641,14 @@ def test_OperationJacobianChecker(): def test_NNJacobianChecker(): """ test_NNJacobianChecker """ + class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.dense = nn.Dense(10, 10) + def construct(self, x): out = self.dense(x) return out, x @@ -564,13 +660,16 @@ def test_NNJacobianChecker(): input_selector=[1], output_selector=[0]) + def multi_outputs(x, y): z = x + y return 2 * z, 2 * z + def test_grad_multi_outputs(): assert C.grad_all_with_sens(multi_outputs)(2, 3, (1, 1)) == (4, 4) + @ms_function def while_sp(x, y, z): out = x @@ -580,6 +679,7 @@ def while_sp(x, y, z): i = i + c3 return out + def test_while_sp(): y = Tensor(np.ones([1, 3]).astype(np.float32)) z = Tensor(np.ones([1, 3]).astype(np.float32)) @@ -587,6 +687,7 @@ def test_while_sp(): res = while_sp(x, y, z) assert res == Tensor(np.ones([1, 3]).astype(np.float32) * 1024.0) + def grad_refactor_simple_1(x, y): """ add """ return x * x + 2 * y @@ -607,8 +708,10 @@ def test_grad_refactor_simple_2(): def grad_refactor_1(a, b): """ if_test """ + def inner(x, y): return x * y + return inner(a, b) @@ -618,8 +721,10 @@ def test_grad_refactor_1(): def grad_refactor_2(a, b): """ if_test """ + def inner(x): return x * b + return inner(b) * inner(a) @@ -701,10 +806,13 @@ def test_grad_refactor_10(): def test_grad_refactor_11(): class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() + def construct(self, x, y): return x * y * y + net = Net() C.grad_all(net)(Tensor(np.ones([2]).astype(np.float32)), Tensor(np.ones([2]).astype(np.float32))) @@ -712,11 +820,14 @@ def test_grad_refactor_11(): def test_grad_refactor_12(): class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.z = Parameter(Tensor(np.array([1.0], np.float32)), name='z') + def construct(self, x, y): return x * self.z * y + net = Net() C.grad_all(net)(Tensor(np.ones([2]).astype(np.float32)), Tensor(np.zeros([2]).astype(np.float32))) @@ -724,11 +835,14 @@ def test_grad_refactor_12(): def test_grad_refactor_13(): class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.z = Parameter(Tensor(np.ones([2]).astype(np.float32)), name='z') + def construct(self, x, y): return x * self.z * y + net = Net() weights = ParameterTuple(net.trainable_params()) C.grad_by_list(net, weights)(Tensor(np.ones([2]).astype(np.float32)), Tensor(np.zeros([2]).astype(np.float32))) @@ -736,15 +850,21 @@ def test_grad_refactor_13(): def grad_refactor_14(a, b): """ if_test """ + def inner1(x): return x * b + def inner2(x): return a * b + def inner3(x): if (x > 2): return a return b + return inner1(b) + inner2(a) + inner3(a) + + def test_grad_refactor_14(): assert C.grad_all(grad_refactor_14)(2, 3) == (3, 9) @@ -761,6 +881,7 @@ class IfDeferInline(nn.Cell): x = x return x + def test_grad_if_defer_inline(): """ test_grad_if_defer_inline """ network = IfDeferInline([128, 96]) @@ -769,6 +890,7 @@ def test_grad_if_defer_inline(): grads = C.grad_all(network)(inp) assert grads == (Tensor(np.full([128, 96], 0.6, dtype=np.float32)),) + def test_bprop_with_wrong_output_num(): class BpropWithWrongOutputNum(PrimitiveWithInfer): @prim_attr_register @@ -787,18 +909,23 @@ def test_bprop_with_wrong_output_num(): @bprop_getters.register(BpropWithWrongOutputNum) def get_bprop_with_wrong_output_num(self): """Generate bprop for BpropWithWrongOutputNum""" + def bprop(x, y, out, dout): return (dout,) + return bprop class BpropWithWrongOutputNumCell(nn.Cell): def __init__(self): super(BpropWithWrongOutputNumCell, self).__init__() + def construct(self, x, y): return BpropWithWrongOutputNum()(x, y) + with pytest.raises(TypeError): C.grad_all(BpropWithWrongOutputNumCell())(1, 2) + def test_bprop_with_wrong_output_type(): class BpropWithWrongOutputType(PrimitiveWithInfer): @prim_attr_register @@ -817,18 +944,23 @@ def test_bprop_with_wrong_output_type(): @bprop_getters.register(BpropWithWrongOutputType) def get_bprop_with_wrong_output_type(self): """Generate bprop for BpropWithWrongOutputType""" + def bprop(x, out, dout): return (1,) + return bprop class BpropWithWrongOutputTypeCell(nn.Cell): def __init__(self): super(BpropWithWrongOutputTypeCell, self).__init__() + def construct(self, x): return BpropWithWrongOutputType()(x) + with pytest.raises(TypeError): C.grad_all(BpropWithWrongOutputTypeCell())(Tensor(np.ones([64, 10]).astype(np.int32))) + def test_bprop_with_wrong_output_shape(): class BpropWithWrongOutputShape(PrimitiveWithInfer): @prim_attr_register @@ -847,15 +979,19 @@ def test_bprop_with_wrong_output_shape(): @bprop_getters.register(BpropWithWrongOutputShape) def get_bprop_with_wrong_output_shape(self): """Generate bprop for BpropWithWrongOutputShape""" - ones = Tensor(np.ones([2,]).astype(np.int32)) + ones = Tensor(np.ones([2, ]).astype(np.int32)) + def bprop(x, out, dout): return (ones,) + return bprop class BpropWithWrongOutputShapeCell(nn.Cell): def __init__(self): super(BpropWithWrongOutputShapeCell, self).__init__() + def construct(self, x): return BpropWithWrongOutputShape()(x) + with pytest.raises(TypeError): C.grad_all(BpropWithWrongOutputShapeCell())(Tensor(np.ones([64, 10]).astype(np.int32))) diff --git a/tests/ut/python/pynative_mode/test_high_order_grad.py b/tests/ut/python/pynative_mode/test_high_order_grad.py index 5548583cc5..3c26d0fa8b 100644 --- a/tests/ut/python/pynative_mode/test_high_order_grad.py +++ b/tests/ut/python/pynative_mode/test_high_order_grad.py @@ -93,7 +93,7 @@ def if_test(x): """ if_test """ if x > 10: return x * x - return x * x * x + return x * x * x def first_derivative_if(x): @@ -116,11 +116,13 @@ def test_high_order_grad_1(): # 18 * x assert second_derivative_all(3) == 54 + def test_high_order_grad_2(): """ test_high_order_grad_2 """ # 2 assert second_derivative_if(12) == 2 + def test_high_order_grad_3(): """ test_high_order_grad_2 """ # 6 * x diff --git a/tests/ut/python/pynative_mode/test_insert_grad_of.py b/tests/ut/python/pynative_mode/test_insert_grad_of.py index d9368f315b..48033eda0a 100644 --- a/tests/ut/python/pynative_mode/test_insert_grad_of.py +++ b/tests/ut/python/pynative_mode/test_insert_grad_of.py @@ -26,6 +26,7 @@ from mindspore import Tensor from mindspore import context import mindspore + def setup_module(module): context.set_context(mode=context.PYNATIVE_MODE) @@ -34,9 +35,13 @@ def stop_gradient(dx): """ stop_gradient """ return C.zeros_like(dx) + stop = P.InsertGradientOf(stop_gradient) + + def test_InsertGradientOf_1(): """ test_InsertGradientOf_1 """ + def stop_test(x, y): x = stop(x) c = x * y @@ -44,8 +49,10 @@ def test_InsertGradientOf_1(): def f(x, y): return C.grad_all(stop_test)(x, y) + print("stop_gradient:", f(1, 2)) + def clip_gradient(dx): """ clip_gradient """ ret = dx @@ -57,9 +64,13 @@ def clip_gradient(dx): return ret + clip = P.InsertGradientOf(clip_gradient) + + def test_InsertGradientOf_2(): """ test_InsertGradientOf_2 """ + def clip_test(x, y): x = clip(x) y = clip(y) @@ -76,15 +87,22 @@ def test_InsertGradientOf_2(): print("forward: ", f(1.1, 0.1)) print("clip_gradient:", fd(1.1, 0.1)) + summary = P.ScalarSummary() + + def debug_gradient(dx): """ debug_gradient """ summary("dx: ", dx) return dx + debug = P.InsertGradientOf(debug_gradient) + + def test_InsertGradientOf_3(): """ test_InsertGradientOf_3 """ + def debug_test(x, y): x = debug(x) y = debug(y) @@ -93,26 +111,33 @@ def test_InsertGradientOf_3(): def f(x, y): return C.grad_all(debug_test)(x, y) + print("debug_gradient:", f(1, 2)) + def test_print_shape_type(): class Mul(nn.Cell): def __init__(self): super(Mul, self).__init__() self.print_shape_type = PrintShapeTypeCell() self.print_shape_type_gradient = PrintGradShapeTypeCell("Gradients") + def construct(self, x, y): z = x * y self.print_shape_type("Forward", z) self.print_shape_type_gradient(z) return z + bprop(Mul(), Tensor(np.ones([2, 2]).astype(np.float32)), Tensor(np.ones([2, 2]).astype(np.float32))) + def test_cell_assign(): context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + class GradNetWrap(nn.Cell): """ GradNetWrap definition """ + def __init__(self, net): super(GradNetWrap, self).__init__() self.net = net diff --git a/tests/ut/python/pynative_mode/test_multigraph_sink.py b/tests/ut/python/pynative_mode/test_multigraph_sink.py index ad2a778fbc..24530ee226 100644 --- a/tests/ut/python/pynative_mode/test_multigraph_sink.py +++ b/tests/ut/python/pynative_mode/test_multigraph_sink.py @@ -20,7 +20,7 @@ from mindspore.common import ms_function def setup_module(module): - context.set_context(mode = context.PYNATIVE_MODE, save_graphs = False, device_target = "Ascend") + context.set_context(mode=context.PYNATIVE_MODE, save_graphs=False, device_target="Ascend") context.set_context(device_id=0) diff --git a/tests/ut/python/pynative_mode/test_parse_method.py b/tests/ut/python/pynative_mode/test_parse_method.py index cb984056de..a4e6216c42 100644 --- a/tests/ut/python/pynative_mode/test_parse_method.py +++ b/tests/ut/python/pynative_mode/test_parse_method.py @@ -90,6 +90,7 @@ def test_var_parameter_case1(): class Net(nn.Cell): """ Net definition """ + def __init__(self, value1): super(Net, self).__init__() self.relu = nn.ReLU() @@ -110,6 +111,7 @@ class Net(nn.Cell): class ClassTest: """ ClassTest definition """ + def __init__(self, name, value1): self.name = name self.value = value1 @@ -147,6 +149,7 @@ def test_call_method_on_construct(): # Test: call method on parse graph code class Net1(nn.Cell): """ Net1 definition """ + def __init__(self, v1, v2): super(Net1, self).__init__() self.relu = nn.ReLU() @@ -188,6 +191,7 @@ TC = ClassTest("test_class", value) class Net2(nn.Cell): """ Net2 definition """ + def __init__(self, value1): super(Net2, self).__init__() self.value = value1 @@ -237,9 +241,11 @@ def vararg1(x, y): def varargs_main(fn): """ varargs_main """ + @ms_function def t1(*args): return fn(*args) + return t1 @@ -283,6 +289,7 @@ class Access: return self.a return self.b + @ms_function def invoke_dataclass(x, y): """ invoke_dataclass """ diff --git a/tests/ut/python/pynative_mode/test_pynative_model.py b/tests/ut/python/pynative_mode/test_pynative_model.py index cdf33a556b..9b5cb9babc 100644 --- a/tests/ut/python/pynative_mode/test_pynative_model.py +++ b/tests/ut/python/pynative_mode/test_pynative_model.py @@ -30,6 +30,7 @@ def setup_module(module): class GradWrap(nn.Cell): """ GradWrap definition """ + def __init__(self, network): super(GradWrap, self).__init__() self.network = network @@ -43,8 +44,10 @@ class GradWrap(nn.Cell): @non_graph_engine def test_softmaxloss_grad(): """ test_softmaxloss_grad """ + class NetWithLossClass(nn.Cell): """ NetWithLossClass definition """ + def __init__(self, network): super(NetWithLossClass, self).__init__() self.loss = nn.SoftmaxCrossEntropyWithLogits() @@ -56,6 +59,7 @@ def test_softmaxloss_grad(): class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight") @@ -80,8 +84,10 @@ def test_softmaxloss_grad(): @non_graph_engine def test_lenet_grad(): """ test_lenet_grad """ + class NetWithLossClass(nn.Cell): """ NetWithLossClass definition """ + def __init__(self, network): super(NetWithLossClass, self).__init__() self.loss = nn.SoftmaxCrossEntropyWithLogits() @@ -93,6 +99,7 @@ def test_lenet_grad(): class LeNet5(nn.Cell): """ LeNet5 definition """ + def __init__(self): super(LeNet5, self).__init__() self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid') diff --git a/tests/ut/python/pynative_mode/test_remove_unnecessary_phi.py b/tests/ut/python/pynative_mode/test_remove_unnecessary_phi.py index ddd729b250..cf30a7a61a 100644 --- a/tests/ut/python/pynative_mode/test_remove_unnecessary_phi.py +++ b/tests/ut/python/pynative_mode/test_remove_unnecessary_phi.py @@ -31,6 +31,7 @@ def setup_module(module): # as POLY. def test_remove_phi_and_fv(): """ test_remove_phi_and_fv """ + @ms_function @core(loop_can_unroll=True) def loop(x, input_data): @@ -47,6 +48,7 @@ def test_remove_phi_and_fv(): input1 = Tensor(normal(0, 0.1, (3, 3))) print(loop(input1, input_data)) + # Multiple phi nodes should be replaced. # mul Φ0 (mul, Φ0); Φ0 will be replaced by mul; # x Φ1 (x, Φ1); Φ1 will be replaced by x; @@ -55,6 +57,7 @@ def test_remove_phi_and_fv(): # Φ0 and Φ1 in Φ2 node should be replaced with mul and x. def test_remove_multiple_phi(): """ test_remove_multiple_phi """ + @ms_function @core(loop_can_unroll=True) def loop(x): @@ -77,6 +80,7 @@ def test_remove_multiple_phi(): # recursively, Φ5 also should be replaced by x. def test_remove_multiple_phi_recursive(): """ test_remove_multiple_phi_recursive """ + @ms_function @core(loop_can_unroll=True) def loop(x): diff --git a/tests/ut/python/pynative_mode/test_staging.py b/tests/ut/python/pynative_mode/test_staging.py index fc120e7c00..f7a9bb4151 100644 --- a/tests/ut/python/pynative_mode/test_staging.py +++ b/tests/ut/python/pynative_mode/test_staging.py @@ -38,6 +38,7 @@ def tensor_add_func_inner(x, y): z = F.tensor_add(x, y) return z + @ms_function def tensor_add_func(x, y): """ tensor_add_func """ @@ -79,6 +80,7 @@ def tensor_add_test(x, y): class TensorAddMulNet(nn.Cell): """ TensorAddMulNet definition """ + def __init__(self): super(TensorAddMulNet, self).__init__() self.add = P.TensorAdd() @@ -105,6 +107,7 @@ class TensorAddMulNet(nn.Cell): class TensorAddNet(nn.Cell): """ TensorAddNet definition """ + def __init__(self): super(TensorAddNet, self).__init__() self.add = P.TensorAdd() @@ -177,6 +180,7 @@ def test_input_signature(): with pytest.raises(ValueError): tensor_add_test(x3, y3) + def test_scalar_cast(): """ test_scalar_cast """ input_x = 8.5 diff --git a/tests/ut/python/pynative_mode/test_stop_gradient.py b/tests/ut/python/pynative_mode/test_stop_gradient.py index fc447aa558..94a5cdac98 100644 --- a/tests/ut/python/pynative_mode/test_stop_gradient.py +++ b/tests/ut/python/pynative_mode/test_stop_gradient.py @@ -37,38 +37,43 @@ def setup_module(module): def stop_func(x, y): """ stop_func""" - c = x*y + c = x * y c_s = x + y return c_s, c + def stop_test1(x, y): """ stop_test1 """ - c = x*y + c = x * y c_s = stop_gradient(c) return c_s + def stop_test2(x, y): """ stop_test2 """ - c = x*y + c = x * y c_s = stop_gradient(c) - d = c_s+x*y + d = c_s + x * y return d * y + def stop_test3(x, y): """ stop_test3 """ - x = x*y + x = x * y z = stop_test1(x, y) k = z * y return k + def stop_test5(x, y): """ stop_test3 """ - x = x+y - o1, o2= stop_func(x, y) + x = x + y + o1, o2 = stop_func(x, y) c = stop_gradient(o1) - c = o2+c + c = o2 + c return c + def stop_test4(x, y): """ stop_test4 """ c = x + y @@ -76,28 +81,35 @@ def stop_test4(x, y): e = c + c_s return e + def grad_stop_test(x, y): """ grad_stop_test """ return C.grad_all(stop_test2)(x, y) + def grad_stop_test1(x, y): """ grad_stop_test1 """ return C.grad_all(stop_test3)(x, y) + def test_stop(): """ test_stop """ print("test_stop:", grad_stop_test(1, 1)) + def test_stop1(): """ test_stop1 """ print("test_stop1:", grad_stop_test1(2, 3)) + def test_stop5(): """ test_stop1 """ print("test_stop5:", C.grad_all(stop_test5)(2, 3)) + class GradWrap(nn.Cell): """ GradWrap definition """ + def __init__(self, network): super(GradWrap, self).__init__() self.network = network @@ -112,8 +124,10 @@ class GradWrap(nn.Cell): @non_graph_engine def test_softmaxloss_grad(): """ test_softmaxloss_grad """ + class NetWithLossClass(nn.Cell): """ NetWithLossClass definition """ + def __init__(self, network): super(NetWithLossClass, self).__init__() self.loss = nn.SoftmaxCrossEntropyWithLogits() @@ -126,6 +140,7 @@ def test_softmaxloss_grad(): class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.weight = Parameter(Tensor(np.ones([64, 10])), name="weight") @@ -154,6 +169,7 @@ def test_softmaxloss_grad(): out = net(predict, label) print("out:", out) + def test_stop_gradient_1(): class Mul(nn.Cell): def __init__(self): @@ -164,12 +180,14 @@ def test_stop_gradient_1(): ret = x * y ret = stop_gradient(ret) return ret + dx, dy = bprop(Mul(), Tensor(np.ones([2, 2]).astype(np.float32)), Tensor(np.ones([2, 2]).astype(np.float32)), wrt=['inputs']) expect = np.zeros([2, 2]) assert (dx.asnumpy() == expect).all() assert (dy.asnumpy() == expect).all() + def test_stop_gradient_2(): class Mul(nn.Cell): def __init__(self): @@ -180,6 +198,7 @@ def test_stop_gradient_2(): c = x * y z = x * y return c, z + class MulAdd(nn.Cell): def __init__(self): super(MulAdd, self).__init__() @@ -194,11 +213,13 @@ def test_stop_gradient_2(): ret1 = c + x + y ret2 = z + y + y return ret1, ret2 + dx = bprop(MulAdd(), Tensor(np.ones([2, 2]).astype(np.float32)), Tensor(np.ones([2, 2]).astype(np.float32))) expect = np.array([[3.0, 3.0], [3.0, 3.0]]) assert (dx.asnumpy() == expect).all() + def test_stop_gradient_3(): class TupleGetItem(nn.Cell): def __init__(self): @@ -212,51 +233,65 @@ def test_stop_gradient_3(): z2 = t[1] z2 = stop_gradient(z2) return z1, z2, x3, x4, x5 + dx = bprop(TupleGetItem(), - Tensor(np.ones([2]).astype(np.float32)), - Tensor(np.ones([2]).astype(np.float32)), - Tensor(np.ones([2]).astype(np.float32)), - Tensor(np.ones([2]).astype(np.float32)), - Tensor(np.ones([2]).astype(np.float32))) + Tensor(np.ones([2]).astype(np.float32)), + Tensor(np.ones([2]).astype(np.float32)), + Tensor(np.ones([2]).astype(np.float32)), + Tensor(np.ones([2]).astype(np.float32)), + Tensor(np.ones([2]).astype(np.float32))) expect = np.array([[2.0, 2.0], [2.0, 2.0]]) assert (dx.asnumpy() == expect).all() + def test_stop_gradient_4(): def stop_test(x): return stop_gradient(x) + assert C.grad_all(stop_test)(1) == (0,) + def test_stop_gradient_5(): def stop_test(x): y = x + x y = stop_gradient(y) ret = x + y return ret + assert C.grad_all(stop_test)(1) == (1,) + def test_stop_gradient_6(): def stop_test(x, y): ret = x * y ret = stop_gradient(ret) return ret + assert C.grad_all(stop_test)(1, 3) == (0, 0) + class PrimWithMultiOutputs(PrimitiveWithInfer): @prim_attr_register def __init__(self): """init""" + def __call__(self, x, y): """Implement by vm mode.""" return x, y + def infer_shape(self, x_shape, y_shape): return x_shape, y_shape + def infer_dtype(self, x_type, y_type): return x_type, y_type + def get_bprop(self): def bprop(x, y, out, dout): return (dout[0], dout[1]) + return bprop + def test_stop_gradient_7(): class PrimWithMultiOutputs_(nn.Cell): def __init__(self): @@ -268,6 +303,7 @@ def test_stop_gradient_7(): x1, x2 = self.prim_with_multi_outputs(x1, x2) x1 = stop_gradient(x1) return x1, x2 + dx, dy = bprop(PrimWithMultiOutputs_(), Tensor(np.ones([2]).astype(np.float32)), Tensor(np.ones([2]).astype(np.float32)), wrt=['inputs']) expect_dx = np.zeros([2]) @@ -275,6 +311,7 @@ def test_stop_gradient_7(): assert (dx.asnumpy() == expect_dx).all() assert (dy.asnumpy() == expect_dy).all() + def test_stop_gradient_8(): class PrimWithMultiOutputs_(nn.Cell): def __init__(self): @@ -285,6 +322,7 @@ def test_stop_gradient_8(): def construct(self, x1, x2): x1, x2 = stop_gradient(self.prim_with_multi_output(x1, x2)) return x1, x2 + dx, dy = bprop(PrimWithMultiOutputs_(), Tensor(np.ones([2]).astype(np.float32)), Tensor(np.ones([2]).astype(np.float32)), wrt=['inputs']) expect_dx = np.zeros([2]) @@ -292,6 +330,7 @@ def test_stop_gradient_8(): assert (dx.asnumpy() == expect_dx).all() assert (dy.asnumpy() == expect_dy).all() + def test_stop_gradient_9(): class Mul(nn.Cell): def __init__(self): @@ -302,6 +341,7 @@ def test_stop_gradient_9(): c = x * y z = x * y return c, z + class MulAdd(nn.Cell): def __init__(self): super(MulAdd, self).__init__() @@ -317,23 +357,29 @@ def test_stop_gradient_9(): ret1 = c1 + x + y + c2 ret2 = z + y + y return ret1, ret2 + dx = bprop(MulAdd(), Tensor(np.ones([2, 2]).astype(np.float32)), - Tensor(np.ones([2, 2]).astype(np.float32))) + Tensor(np.ones([2, 2]).astype(np.float32))) expect = np.array([[5.0, 5.0], [5.0, 5.0]]) assert (dx.asnumpy() == expect).all() + class PrimWithNoBprop(PrimitiveWithInfer): @prim_attr_register def __init__(self): """init""" + def __call__(self, x, y): """Implement by vm mode.""" return x, y + def infer_shape(self, x_shape, y_shape): return x_shape, y_shape + def infer_dtype(self, x_type, y_type): return x_type, y_type + def test_stop_gradient_10(): class PrimWithNoBprop_(nn.Cell): def __init__(self): @@ -347,11 +393,13 @@ def test_stop_gradient_10(): x = stop_gradient(x) y = stop_gradient(y) return x, y + dx = bprop(PrimWithNoBprop_(), Tensor(np.ones([2]).astype(np.float32)), Tensor(np.ones([2]).astype(np.float32))) expect_dx = np.zeros([2]) assert (dx.asnumpy() == expect_dx).all() + def test_stop_gradient_11(): class PrimWithNoBprop_(nn.Cell): def __init__(self): @@ -363,18 +411,22 @@ def test_stop_gradient_11(): x, y = self.prim_with_no_bprop(x, y) x = stop_gradient(x) return x, y + with pytest.raises(RuntimeError): bprop(PrimWithNoBprop_(), Tensor(np.ones([2]).astype(np.float32)), Tensor(np.ones([2]).astype(np.float32))) + def test_stop_print(): class StopPrint(nn.Cell): def __init__(self): super(StopPrint, self).__init__() self.printm = P.Print() + def construct(self, x, y): self.printm("StopPrint", x) self.printm(y) return x, y + C.grad_all(StopPrint())(Tensor(np.ones([2]).astype(np.float32)), Tensor(np.ones([2]).astype(np.float32))) diff --git a/tests/ut/python/pynative_mode/test_training.py b/tests/ut/python/pynative_mode/test_training.py index c8af5d839c..5b0aa936e0 100644 --- a/tests/ut/python/pynative_mode/test_training.py +++ b/tests/ut/python/pynative_mode/test_training.py @@ -31,6 +31,7 @@ def setup_module(module): class LeNet5(nn.Cell): """ LeNet5 definition """ + def __init__(self): super(LeNet5, self).__init__() self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid') diff --git a/tests/ut/python/pynative_mode/vm/test_vm.py b/tests/ut/python/pynative_mode/vm/test_vm.py index 77510337b0..10f7435a8c 100644 --- a/tests/ut/python/pynative_mode/vm/test_vm.py +++ b/tests/ut/python/pynative_mode/vm/test_vm.py @@ -245,7 +245,7 @@ def test_relu(): def test_softmax(): """ test_softmax """ - logits = 2.84806275*np.ones([1, 10]).astype(np.float32) + logits = 2.84806275 * np.ones([1, 10]).astype(np.float32) y = vm.softmax(logits) assert np.allclose([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], y) assert np.float32 == y.dtype diff --git a/tests/ut/python/train/summary/test_histogram_summary.py b/tests/ut/python/train/summary/test_histogram_summary.py index caaf8c2b83..94298af5c5 100644 --- a/tests/ut/python/train/summary/test_histogram_summary.py +++ b/tests/ut/python/train/summary/test_histogram_summary.py @@ -53,7 +53,6 @@ def test_histogram_summary(): """Test histogram summary.""" with tempfile.TemporaryDirectory() as tmp_dir: with SummaryRecord(tmp_dir, file_suffix="_MS_HISTOGRAM") as test_writer: - test_data = _wrap_test_data(Tensor([[1, 2, 3], [4, 5, 6]])) _cache_summary_tensor_data(test_data) test_writer.record(step=1) @@ -91,7 +90,6 @@ def test_histogram_summary_scalar_tensor(): """Test histogram summary, input is a scalar tensor.""" with tempfile.TemporaryDirectory() as tmp_dir: with SummaryRecord(tmp_dir, file_suffix="_MS_HISTOGRAM") as test_writer: - test_data = _wrap_test_data(Tensor(1)) _cache_summary_tensor_data(test_data) test_writer.record(step=1) @@ -106,7 +104,6 @@ def test_histogram_summary_empty_tensor(): """Test histogram summary, input is an empty tensor.""" with tempfile.TemporaryDirectory() as tmp_dir: with SummaryRecord(tmp_dir, file_suffix="_MS_HISTOGRAM") as test_writer: - test_data = _wrap_test_data(Tensor([])) _cache_summary_tensor_data(test_data) test_writer.record(step=1) @@ -121,7 +118,6 @@ def test_histogram_summary_same_value(): """Test histogram summary, input is an ones tensor.""" with tempfile.TemporaryDirectory() as tmp_dir: with SummaryRecord(tmp_dir, file_suffix="_MS_HISTOGRAM") as test_writer: - dim1 = 100 dim2 = 100 @@ -161,7 +157,6 @@ def test_histogram_summary_nan_inf(): """Test histogram summary, input tensor has nan.""" with tempfile.TemporaryDirectory() as tmp_dir: with SummaryRecord(tmp_dir, file_suffix="_MS_HISTOGRAM") as test_writer: - dim1 = 100 dim2 = 100 @@ -186,7 +181,6 @@ def test_histogram_summary_all_nan_inf(): """Test histogram summary, input tensor has no valid number.""" with tempfile.TemporaryDirectory() as tmp_dir: with SummaryRecord(tmp_dir, file_suffix="_MS_HISTOGRAM") as test_writer: - test_data = _wrap_test_data(Tensor(np.array([np.nan, np.nan, np.nan, np.inf, -np.inf]))) _cache_summary_tensor_data(test_data) test_writer.record(step=1) diff --git a/tests/ut/python/train/summary/test_image_summary.py b/tests/ut/python/train/summary/test_image_summary.py index df81e5251e..ee598d25a3 100644 --- a/tests/ut/python/train/summary/test_image_summary.py +++ b/tests/ut/python/train/summary/test_image_summary.py @@ -75,7 +75,6 @@ def test_image_summary_sample(): log.debug("begin test_image_summary_sample") # step 0: create the thread with SummaryRecord(SUMMARY_DIR, file_suffix="_MS_IMAGE") as test_writer: - # step 1: create the test data for summary # step 2: create the Event @@ -173,7 +172,6 @@ def test_image_summary_train(): log.debug("begin test_image_summary_sample") # step 0: create the thread with SummaryRecord(SUMMARY_DIR, file_suffix="_MS_IMAGE") as test_writer: - # step 1: create the test data for summary # step 2: create the Event @@ -206,7 +204,6 @@ def test_image_summary_data(): log.debug("begin test_image_summary_sample") # step 0: create the thread with SummaryRecord(SUMMARY_DIR, file_suffix="_MS_IMAGE") as test_writer: - # step 1: create the test data for summary # step 2: create the Event diff --git a/tests/ut/python/train/summary/test_summary.py b/tests/ut/python/train/summary/test_summary.py index 5ec6e0157d..1b5e75ee26 100644 --- a/tests/ut/python/train/summary/test_summary.py +++ b/tests/ut/python/train/summary/test_summary.py @@ -66,7 +66,6 @@ def test_scalar_summary_sample(): log.debug("begin test_scalar_summary_sample") # step 0: create the thread with SummaryRecord(SUMMARY_DIR, file_suffix="_MS_SCALAR") as test_writer: - # step 1: create the test data for summary # step 2: create the Event @@ -110,7 +109,6 @@ def test_scalar_summary_sample_with_shape_1(): log.debug("begin test_scalar_summary_sample_with_shape_1") # step 0: create the thread with SummaryRecord(SUMMARY_DIR, file_suffix="_MS_SCALAR") as test_writer: - # step 1: create the test data for summary # step 2: create the Event @@ -151,7 +149,6 @@ def test_scalar_summary_with_ge(): # step 0: create the thread with SummaryRecord(SUMMARY_DIR, file_suffix="_MS_SCALAR") as test_writer: - # step 1: create the network for summary x = Tensor(np.array([1.1]).astype(np.float32)) y = Tensor(np.array([1.2]).astype(np.float32)) @@ -166,7 +163,6 @@ def test_scalar_summary_with_ge(): net(x, y) test_writer.record(i) - log.debug("finished test_scalar_summary_with_ge") @@ -177,7 +173,6 @@ def test_scalar_summary_with_ge_2(): # step 0: create the thread with SummaryRecord(SUMMARY_DIR, file_suffix="_MS_SCALAR") as test_writer: - # step 1: create the network for summary x = Tensor(np.array([1.1]).astype(np.float32)) y = Tensor(np.array([1.2]).astype(np.float32)) @@ -192,13 +187,11 @@ def test_scalar_summary_with_ge_2(): net(x, y) test_writer.record(i) - log.debug("finished test_scalar_summary_with_ge_2") def test_validate(): with SummaryRecord(SUMMARY_DIR) as sr: - with pytest.raises(ValueError): SummaryStep(sr, 0) with pytest.raises(ValueError): diff --git a/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py b/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py index a434075f64..1374330c22 100644 --- a/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py +++ b/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py @@ -127,7 +127,6 @@ def run_case(net): """ run_case """ # step 0: create the thread with SummaryRecord(SUMMARY_DIR) as test_writer: - # step 1: create the network for summary x = Tensor(np.array([1.1]).astype(np.float32)) y = Tensor(np.array([1.2]).astype(np.float32)) @@ -142,7 +141,6 @@ def run_case(net): test_writer.record(i) - # Test 1: use the repeat tag def test_summary_use_repeat_tag(): log.debug("begin test_summary_use_repeat_tag") diff --git a/tests/ut/python/train/summary/test_tensor_summary.py b/tests/ut/python/train/summary/test_tensor_summary.py index c41480f21f..4950241055 100644 --- a/tests/ut/python/train/summary/test_tensor_summary.py +++ b/tests/ut/python/train/summary/test_tensor_summary.py @@ -81,7 +81,6 @@ def test_tensor_summary_sample(): log.debug("begin test_tensor_summary_sample") # step 0: create the thread with SummaryRecord(SUMMARY_DIR, file_suffix="_MS_TENSOR") as test_writer: - # step 1: create the Event for i in range(1, 100): test_data = get_test_data(i) @@ -131,7 +130,6 @@ def test_tensor_summary_with_ge(): # step 0: create the thread with SummaryRecord(SUMMARY_DIR) as test_writer: - # step 1: create the network for summary x = Tensor(np.array([1.1]).astype(np.float32)) y = Tensor(np.array([1.2]).astype(np.float32)) diff --git a/tests/ut/python/ut_filter.py b/tests/ut/python/ut_filter.py index 683cd80a77..858dbf330e 100644 --- a/tests/ut/python/ut_filter.py +++ b/tests/ut/python/ut_filter.py @@ -20,26 +20,32 @@ import os import pytest + def is_enable_ge(): val = os.getenv("ENABLE_GE", "False") if val in ('ON', 'on', 'TRUE', 'True', 'true'): return True return False + non_graph_engine = pytest.mark.skipif(is_enable_ge(), reason="Not support running on GE environment") + def is_enable_gpu(): val = os.getenv("ENABLE_GPU", "False") if val in ('ON', 'on', 'TRUE', 'True', 'true'): return True return False + run_on_gpu = pytest.mark.skipif(not is_enable_gpu(), reason="Only support running on GPU environment") + def is_enable_onnxruntime(): val = os.getenv("ENABLE_ONNXRUNTIME", "False") if val in ('ON', 'on', 'TRUE', 'True', 'true'): return True return False + run_on_onnxruntime = pytest.mark.skipif(not is_enable_onnxruntime(), reason="Only support running on onnxruntime") diff --git a/tests/ut/python/utils/test_initializer.py b/tests/ut/python/utils/test_initializer.py index f331bedf67..e44d0f25d6 100644 --- a/tests/ut/python/utils/test_initializer.py +++ b/tests/ut/python/utils/test_initializer.py @@ -240,6 +240,7 @@ class Net(nn.Cell): z = self.add(z, self.t2) return z + def test_weight_shape(): context.set_context(mode=context.GRAPH_MODE, save_graphs=True) a = np.arange(20).reshape(5, 4)