From d40e89b1bc077b1b0531d76c6bd2f91caed6b100 Mon Sep 17 00:00:00 2001 From: jinyaohui Date: Mon, 11 May 2020 20:59:30 +0800 Subject: [PATCH] clean pylint warning in st/ops/ascend --- tests/st/ops/ascend/test_add.py | 11 +- tests/st/ops/ascend/test_addn.py | 6 +- .../ascend/test_aicpu_ops/test_expand_dims.py | 147 ++++++++++-------- .../ops/ascend/test_aicpu_ops/test_flatten.py | 125 ++++++++------- .../ascend/test_aicpu_ops/test_is_finite.py | 147 ++++++++++-------- .../ops/ascend/test_aicpu_ops/test_reshape.py | 147 ++++++++++-------- .../ops/ascend/test_aicpu_ops/test_squeeze.py | 147 ++++++++++-------- tests/st/ops/ascend/test_apply_momentum.py | 15 +- tests/st/ops/ascend/test_biasAddGrad.py | 25 +-- tests/st/ops/ascend/test_bias_add_grad.py | 5 +- tests/st/ops/ascend/test_conv.py | 23 +-- tests/st/ops/ascend/test_conv2dGradFilter.py | 45 +++--- tests/st/ops/ascend/test_conv_grad.py | 20 ++- tests/st/ops/ascend/test_dense.py | 4 + tests/st/ops/ascend/test_dense_grad.py | 4 + tests/st/ops/ascend/test_drop_out_gen_mask.py | 1 + tests/st/ops/ascend/test_full_connection.py | 1 + tests/st/ops/ascend/test_fused_batchnorm.py | 6 +- .../ops/ascend/test_fused_batchnorm_grad.py | 10 +- tests/st/ops/ascend/test_image_gradients.py | 40 ++--- tests/st/ops/ascend/test_matmul.py | 9 +- tests/st/ops/ascend/test_maxpool.py | 5 +- tests/st/ops/ascend/test_maxpool_grad.py | 1 + .../ascend/test_maxpool_with_argmax_grad.py | 8 +- tests/st/ops/ascend/test_relu.py | 6 +- tests/st/ops/ascend/test_relu_grad.py | 8 +- tests/st/ops/ascend/test_reshape.py | 24 +-- tests/st/ops/ascend/test_simplemean.py | 6 +- tests/st/ops/ascend/test_simplemean_grad.py | 8 +- ...est_sparseSoftmaxCrossEntropyWithLogits.py | 8 +- ..._softmax_cross_entropy_with_logits_grad.py | 1 + .../ops/ascend/test_tbe_ops/test_AssignAdd.py | 6 +- .../ops/ascend/test_tbe_ops/test_AssignSub.py | 6 +- .../ascend/test_tbe_ops/test_ReduceMean.py | 5 + tests/st/ops/ascend/test_tbe_ops/test_add.py | 6 +- tests/st/ops/ascend/test_tbe_ops/test_addn.py | 6 +- .../ascend/test_tbe_ops/test_apply_adam.py | 19 +-- .../test_tbe_ops/test_apply_momentum.py | 13 +- .../ascend/test_tbe_ops/test_batchmatmul.py | 5 + .../ops/ascend/test_tbe_ops/test_batchnorm.py | 4 +- .../test_tbe_ops/test_batchnorm_grad.py | 9 +- .../ops/ascend/test_tbe_ops/test_bias_add.py | 3 +- .../ascend/test_tbe_ops/test_bias_add_grad.py | 4 + .../st/ops/ascend/test_tbe_ops/test_concat.py | 5 +- tests/st/ops/ascend/test_tbe_ops/test_conv.py | 21 ++- .../test_conv2d_backprop_filter.py | 17 +- .../test_conv2d_backprop_input.py | 27 ++-- .../test_tbe_ops/test_dropout_do_mask.py | 5 +- tests/st/ops/ascend/test_tbe_ops/test_gelu.py | 3 + .../test_tbe_ops/test_gelu_grad_sens.py | 5 +- .../ops/ascend/test_tbe_ops/test_greater.py | 6 +- .../ops/ascend/test_tbe_ops/test_layernorm.py | 6 +- .../test_tbe_ops/test_layernorm_grad.py | 7 +- tests/st/ops/ascend/test_tbe_ops/test_less.py | 10 +- .../ascend/test_tbe_ops/test_less_equal.py | 10 +- .../ascend/test_tbe_ops/test_logical_and.py | 7 +- .../ascend/test_tbe_ops/test_logical_not.py | 6 +- .../ascend/test_tbe_ops/test_logical_or.py | 7 +- .../st/ops/ascend/test_tbe_ops/test_matmul.py | 7 +- .../ascend/test_tbe_ops/test_matmul_failed.py | 9 +- .../ops/ascend/test_tbe_ops/test_maximum.py | 10 +- .../ascend/test_tbe_ops/test_maximum_grad.py | 6 +- .../ops/ascend/test_tbe_ops/test_maxpool.py | 4 +- .../ascend/test_tbe_ops/test_maxpool_grad.py | 1 + .../ops/ascend/test_tbe_ops/test_minimum.py | 10 +- .../ascend/test_tbe_ops/test_minimum_grad.py | 5 +- tests/st/ops/ascend/test_tbe_ops/test_mul.py | 9 +- .../test_npu_alloc_float_status.py | 5 +- .../test_npu_clear_float_status.py | 6 +- .../test_tbe_ops/test_npu_get_float_status.py | 6 +- tests/st/ops/ascend/test_tbe_ops/test_pad.py | 5 +- tests/st/ops/ascend/test_tbe_ops/test_pow.py | 10 +- .../ops/ascend/test_tbe_ops/test_realdiv.py | 9 +- .../ascend/test_tbe_ops/test_reciprocal.py | 6 +- tests/st/ops/ascend/test_tbe_ops/test_relu.py | 6 +- .../ops/ascend/test_tbe_ops/test_relu_grad.py | 8 +- .../ascend/test_tbe_ops/test_relu_v2_grad.py | 6 +- .../test_resize_nearest_neighbor.py | 3 + .../test_resize_nearest_neighbor_grad.py | 3 +- .../ascend/test_tbe_ops/test_scatter_nd.py | 4 +- .../st/ops/ascend/test_tbe_ops/test_select.py | 19 ++- .../ops/ascend/test_tbe_ops/test_sigmoid.py | 3 + .../test_sigmoid_cross_entropy_with_logits.py | 1 + ..._sigmoid_cross_entropy_with_logits_grad.py | 1 + .../ascend/test_tbe_ops/test_sigmoid_grad.py | 3 +- .../st/ops/ascend/test_tbe_ops/test_slice.py | 14 +- .../test_tbe_ops/test_smooth_l1_loss.py | 1 + .../test_tbe_ops/test_smooth_l1_loss_grad.py | 1 + .../ops/ascend/test_tbe_ops/test_softmax.py | 7 +- .../test_softmax_cross_entropy_with_logits.py | 3 +- .../st/ops/ascend/test_tbe_ops/test_split.py | 6 +- tests/st/ops/ascend/test_tbe_ops/test_sqrt.py | 7 +- .../st/ops/ascend/test_tbe_ops/test_square.py | 7 +- .../ascend/test_tbe_ops/test_stridedslice.py | 18 ++- .../test_tbe_ops/test_stridedslice_grad.py | 5 + tests/st/ops/ascend/test_tbe_ops/test_sub.py | 10 +- tests/st/ops/ascend/test_tbe_ops/test_tanh.py | 6 +- .../ops/ascend/test_tbe_ops/test_tanh_grad.py | 6 +- tests/st/ops/ascend/test_tbe_ops/test_tile.py | 1 + tests/st/ops/ascend/test_tbe_ops/test_topk.py | 6 +- .../ascend/test_tbe_ops/test_transpose_d.py | 5 +- .../test_tbe_ops/test_unsorted_segment_sum.py | 6 +- tests/st/ops/ascend/test_tdt_data_ms.py | 3 +- 103 files changed, 942 insertions(+), 576 deletions(-) diff --git a/tests/st/ops/ascend/test_add.py b/tests/st/ops/ascend/test_add.py index 659bb59466..7d908158ce 100644 --- a/tests/st/ops/ascend/test_add.py +++ b/tests/st/ops/ascend/test_add.py @@ -20,18 +20,23 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_context(enable_task_sink=True) + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.add = P.TensorAdd() - + def construct(self, x, y): return self.add(x, y) -x = np.ones([1,3,3,4]).astype(np.float32) -y = np.ones([1,3,3,4]).astype(np.float32) + +x = np.ones([1, 3, 3, 4]).astype(np.float32) +y = np.ones([1, 3, 3, 4]).astype(np.float32) + def test_net(): add = Net() diff --git a/tests/st/ops/ascend/test_addn.py b/tests/st/ops/ascend/test_addn.py index 143dc616d0..82cf82010a 100644 --- a/tests/st/ops/ascend/test_addn.py +++ b/tests/st/ops/ascend/test_addn.py @@ -20,15 +20,19 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.add = P.AddN() - + def construct(self, x, y): return self.add((x, y)) + def test_net(): x = np.random.randn(1, 3, 3, 4).astype(np.float32) y = np.random.randn(1, 3, 3, 4).astype(np.float32) diff --git a/tests/st/ops/ascend/test_aicpu_ops/test_expand_dims.py b/tests/st/ops/ascend/test_aicpu_ops/test_expand_dims.py index f47159af3b..664fa2c950 100644 --- a/tests/st/ops/ascend/test_aicpu_ops/test_expand_dims.py +++ b/tests/st/ops/ascend/test_aicpu_ops/test_expand_dims.py @@ -18,97 +18,110 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") + + class Net(nn.Cell): - def __init__(self): - super(Net, self).__init__() - self.expand_dims = P.ExpandDims() + def __init__(self): + super(Net, self).__init__() + self.expand_dims = P.ExpandDims() - def construct(self, tensor, dim): - return self.expand_dims(tensor, dim) + def construct(self, tensor, dim): + return self.expand_dims(tensor, dim) def test_net_bool(): - x = np.random.randn(1, 16, 1, 1).astype(np.bool) - net = Net() - output = net(Tensor(x), -1) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.expand_dims(x, -1))) + x = np.random.randn(1, 16, 1, 1).astype(np.bool) + net = Net() + output = net(Tensor(x), -1) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + def test_net_int8(): - x = np.random.randn(1, 16, 1, 1).astype(np.int8) - net = Net() - output = net(Tensor(x), -1) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.expand_dims(x, -1))) - + x = np.random.randn(1, 16, 1, 1).astype(np.int8) + net = Net() + output = net(Tensor(x), -1) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + + def test_net_uint8(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint8) - net = Net() - output = net(Tensor(x), -1) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.expand_dims(x, -1))) + x = np.random.randn(1, 16, 1, 1).astype(np.uint8) + net = Net() + output = net(Tensor(x), -1) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + def test_net_int16(): - x = np.random.randn(1, 16, 1, 1).astype(np.int16) - net = Net() - output = net(Tensor(x), -1) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.expand_dims(x, -1))) + x = np.random.randn(1, 16, 1, 1).astype(np.int16) + net = Net() + output = net(Tensor(x), -1) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + def test_net_uint16(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint16) - net = Net() - output = net(Tensor(x), -1) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.expand_dims(x, -1))) + x = np.random.randn(1, 16, 1, 1).astype(np.uint16) + net = Net() + output = net(Tensor(x), -1) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + def test_net_int32(): - x = np.random.randn(1, 16, 1, 1).astype(np.int32) - net = Net() - output = net(Tensor(x), -1) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.expand_dims(x, -1))) + x = np.random.randn(1, 16, 1, 1).astype(np.int32) + net = Net() + output = net(Tensor(x), -1) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + def test_net_uint32(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint32) - net = Net() - output = net(Tensor(x), -1) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.expand_dims(x, -1))) + x = np.random.randn(1, 16, 1, 1).astype(np.uint32) + net = Net() + output = net(Tensor(x), -1) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + def test_net_int64(): - x = np.random.randn(1, 16, 1, 1).astype(np.int64) - net = Net() - output = net(Tensor(x), -1) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.expand_dims(x, -1))) + x = np.random.randn(1, 16, 1, 1).astype(np.int64) + net = Net() + output = net(Tensor(x), -1) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + def test_net_uint64(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint64) - net = Net() - output = net(Tensor(x), -1) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.expand_dims(x, -1))) + x = np.random.randn(1, 16, 1, 1).astype(np.uint64) + net = Net() + output = net(Tensor(x), -1) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + def test_net_float16(): - x = np.random.randn(1, 16, 1, 1).astype(np.float16) - net = Net() - output = net(Tensor(x), -1) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.expand_dims(x, -1))) + x = np.random.randn(1, 16, 1, 1).astype(np.float16) + net = Net() + output = net(Tensor(x), -1) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + def test_net_float32(): - x = np.random.randn(1, 16, 1, 1).astype(np.float32) - net = Net() - output = net(Tensor(x), -1) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.expand_dims(x, -1))) + x = np.random.randn(1, 16, 1, 1).astype(np.float32) + net = Net() + output = net(Tensor(x), -1) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) + def test_net_float64(): - x = np.random.randn(1, 16, 1, 1).astype(np.float64) - net = Net() - output = net(Tensor(x), -1) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.expand_dims(x, -1))) - + x = np.random.randn(1, 16, 1, 1).astype(np.float64) + net = Net() + output = net(Tensor(x), -1) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) diff --git a/tests/st/ops/ascend/test_aicpu_ops/test_flatten.py b/tests/st/ops/ascend/test_aicpu_ops/test_flatten.py index 45a336a0e1..b7c24ac41e 100644 --- a/tests/st/ops/ascend/test_aicpu_ops/test_flatten.py +++ b/tests/st/ops/ascend/test_aicpu_ops/test_flatten.py @@ -17,83 +17,94 @@ from mindspore.ops import operations as P import mindspore.nn as nn import numpy as np import mindspore.context as context + context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") + + class Net(nn.Cell): - def __init__(self): - super(Net, self).__init__() - self.flatten = P.Flatten() + def __init__(self): + super(Net, self).__init__() + self.flatten = P.Flatten() - def construct(self, tensor): - return self.flatten(tensor) + def construct(self, tensor): + return self.flatten(tensor) def test_net_int8(): - x = np.random.randn(1, 16, 1, 1).astype(np.int8) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.flatten())) - + x = np.random.randn(1, 16, 1, 1).astype(np.int8) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.flatten())) + + def test_net_uint8(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint8) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.flatten())) + x = np.random.randn(1, 16, 1, 1).astype(np.uint8) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.flatten())) + def test_net_int16(): - x = np.random.randn(1, 16, 1, 1).astype(np.int16) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.flatten())) + x = np.random.randn(1, 16, 1, 1).astype(np.int16) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.flatten())) + def test_net_uint16(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint16) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.flatten())) + x = np.random.randn(1, 16, 1, 1).astype(np.uint16) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.flatten())) + def test_net_int32(): - x = np.random.randn(1, 16, 1, 1).astype(np.int32) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.flatten())) + x = np.random.randn(1, 16, 1, 1).astype(np.int32) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.flatten())) + def test_net_uint32(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint32) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.flatten())) + x = np.random.randn(1, 16, 1, 1).astype(np.uint32) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.flatten())) + def test_net_int64(): - x = np.random.randn(1, 16, 1, 1).astype(np.int64) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.flatten())) + x = np.random.randn(1, 16, 1, 1).astype(np.int64) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.flatten())) + def test_net_uint64(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint64) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.flatten())) + x = np.random.randn(1, 16, 1, 1).astype(np.uint64) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.flatten())) + def test_net_float16(): - x = np.random.randn(1, 16, 1, 1).astype(np.float16) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.flatten())) + x = np.random.randn(1, 16, 1, 1).astype(np.float16) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.flatten())) + def test_net_float32(): - x = np.random.randn(1, 16, 1, 1).astype(np.float32) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.flatten())) - + x = np.random.randn(1, 16, 1, 1).astype(np.float32) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.flatten())) diff --git a/tests/st/ops/ascend/test_aicpu_ops/test_is_finite.py b/tests/st/ops/ascend/test_aicpu_ops/test_is_finite.py index 149d48ce04..7de73cd853 100644 --- a/tests/st/ops/ascend/test_aicpu_ops/test_is_finite.py +++ b/tests/st/ops/ascend/test_aicpu_ops/test_is_finite.py @@ -18,97 +18,110 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") + + class Net(nn.Cell): - def __init__(self): - super(Net, self).__init__() - self.isfinite = P.IsFinite() + def __init__(self): + super(Net, self).__init__() + self.isfinite = P.IsFinite() - def construct(self, tensor): - return self.isfinite(tensor) + def construct(self, tensor): + return self.isfinite(tensor) def test_net_bool(): - x = np.random.randn(1, 16, 1, 1).astype(np.bool) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.isfinite(x))) + x = np.random.randn(1, 16, 1, 1).astype(np.bool) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.isfinite(x))) + def test_net_int8(): - x = np.random.randn(1, 16, 1, 1).astype(np.int8) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.isfinite(x))) - + x = np.random.randn(1, 16, 1, 1).astype(np.int8) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.isfinite(x))) + + def test_net_uint8(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint8) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.isfinite(x))) + x = np.random.randn(1, 16, 1, 1).astype(np.uint8) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.isfinite(x))) + def test_net_int16(): - x = np.random.randn(1, 16, 1, 1).astype(np.int16) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.isfinite(x))) + x = np.random.randn(1, 16, 1, 1).astype(np.int16) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.isfinite(x))) + def test_net_uint16(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint16) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.isfinite(x))) + x = np.random.randn(1, 16, 1, 1).astype(np.uint16) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.isfinite(x))) + def test_net_int32(): - x = np.random.randn(1, 16, 1, 1).astype(np.int32) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.isfinite(x))) + x = np.random.randn(1, 16, 1, 1).astype(np.int32) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.isfinite(x))) + def test_net_uint32(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint32) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.isfinite(x))) + x = np.random.randn(1, 16, 1, 1).astype(np.uint32) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.isfinite(x))) + def test_net_int64(): - x = np.random.randn(1, 16, 1, 1).astype(np.int64) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.isfinite(x))) + x = np.random.randn(1, 16, 1, 1).astype(np.int64) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.isfinite(x))) + def test_net_uint64(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint64) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.isfinite(x))) + x = np.random.randn(1, 16, 1, 1).astype(np.uint64) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.isfinite(x))) + def test_net_float16(): - x = np.random.randn(1, 16, 1, 1).astype(np.float16) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.isfinite(x))) + x = np.random.randn(1, 16, 1, 1).astype(np.float16) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.isfinite(x))) + def test_net_float32(): - x = np.random.randn(1, 16, 1, 1).astype(np.float32) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.isfinite(x))) + x = np.random.randn(1, 16, 1, 1).astype(np.float32) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.isfinite(x))) + def test_net_float64(): - x = np.random.randn(1, 16, 1, 1).astype(np.float64) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.isfinite(x))) - + x = np.random.randn(1, 16, 1, 1).astype(np.float64) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.isfinite(x))) diff --git a/tests/st/ops/ascend/test_aicpu_ops/test_reshape.py b/tests/st/ops/ascend/test_aicpu_ops/test_reshape.py index 7d380fabf8..6839c96e50 100644 --- a/tests/st/ops/ascend/test_aicpu_ops/test_reshape.py +++ b/tests/st/ops/ascend/test_aicpu_ops/test_reshape.py @@ -18,97 +18,110 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") + + class Net(nn.Cell): - def __init__(self): - super(Net, self).__init__() - self.reshape = P.Reshape() + def __init__(self): + super(Net, self).__init__() + self.reshape = P.Reshape() - def construct(self, tensor): - return self.reshape(tensor, (4,4)) + def construct(self, tensor): + return self.reshape(tensor, (4, 4)) def test_net_bool(): - x = np.random.randn(1, 16, 1, 1).astype(np.bool) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.reshape(x, (4,4)))) + x = np.random.randn(1, 16, 1, 1).astype(np.bool) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + def test_net_int8(): - x = np.random.randn(1, 16, 1, 1).astype(np.int8) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.reshape(x, (4,4)))) - + x = np.random.randn(1, 16, 1, 1).astype(np.int8) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + + def test_net_uint8(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint8) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.reshape(x, (4,4)))) + x = np.random.randn(1, 16, 1, 1).astype(np.uint8) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + def test_net_int16(): - x = np.random.randn(1, 16, 1, 1).astype(np.int16) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.reshape(x, (4,4)))) + x = np.random.randn(1, 16, 1, 1).astype(np.int16) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + def test_net_uint16(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint16) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.reshape(x, (4,4)))) + x = np.random.randn(1, 16, 1, 1).astype(np.uint16) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + def test_net_int32(): - x = np.random.randn(1, 16, 1, 1).astype(np.int32) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.reshape(x, (4,4)))) + x = np.random.randn(1, 16, 1, 1).astype(np.int32) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + def test_net_uint32(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint32) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.reshape(x, (4,4)))) + x = np.random.randn(1, 16, 1, 1).astype(np.uint32) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + def test_net_int64(): - x = np.random.randn(1, 16, 1, 1).astype(np.int64) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.reshape(x, (4,4)))) + x = np.random.randn(1, 16, 1, 1).astype(np.int64) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + def test_net_uint64(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint64) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.reshape(x, (4,4)))) + x = np.random.randn(1, 16, 1, 1).astype(np.uint64) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + def test_net_float16(): - x = np.random.randn(1, 16, 1, 1).astype(np.float16) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.reshape(x, (4,4)))) + x = np.random.randn(1, 16, 1, 1).astype(np.float16) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + def test_net_float32(): - x = np.random.randn(1, 16, 1, 1).astype(np.float32) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.reshape(x, (4,4)))) + x = np.random.randn(1, 16, 1, 1).astype(np.float32) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) + def test_net_float64(): - x = np.random.randn(1, 16, 1, 1).astype(np.float64) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == np.reshape(x, (4,4)))) - + x = np.random.randn(1, 16, 1, 1).astype(np.float64) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) diff --git a/tests/st/ops/ascend/test_aicpu_ops/test_squeeze.py b/tests/st/ops/ascend/test_aicpu_ops/test_squeeze.py index 4a17feeff0..58b6a3f07c 100644 --- a/tests/st/ops/ascend/test_aicpu_ops/test_squeeze.py +++ b/tests/st/ops/ascend/test_aicpu_ops/test_squeeze.py @@ -17,97 +17,110 @@ from mindspore.ops import operations as P import mindspore.nn as nn import numpy as np import mindspore.context as context + context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") + + class Net(nn.Cell): - def __init__(self): - super(Net, self).__init__() - self.squeeze = P.Squeeze() + def __init__(self): + super(Net, self).__init__() + self.squeeze = P.Squeeze() - def construct(self, tensor): - return self.squeeze(tensor) + def construct(self, tensor): + return self.squeeze(tensor) def test_net_bool(): - x = np.random.randn(1, 16, 1, 1).astype(np.bool) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.squeeze())) + x = np.random.randn(1, 16, 1, 1).astype(np.bool) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.squeeze())) + def test_net_int8(): - x = np.random.randn(1, 16, 1, 1).astype(np.int8) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.squeeze())) - + x = np.random.randn(1, 16, 1, 1).astype(np.int8) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.squeeze())) + + def test_net_uint8(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint8) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.squeeze())) + x = np.random.randn(1, 16, 1, 1).astype(np.uint8) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.squeeze())) + def test_net_int16(): - x = np.random.randn(1, 16, 1, 1).astype(np.int16) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.squeeze())) + x = np.random.randn(1, 16, 1, 1).astype(np.int16) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.squeeze())) + def test_net_uint16(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint16) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.squeeze())) + x = np.random.randn(1, 16, 1, 1).astype(np.uint16) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.squeeze())) + def test_net_int32(): - x = np.random.randn(1, 16, 1, 1).astype(np.int32) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.squeeze())) + x = np.random.randn(1, 16, 1, 1).astype(np.int32) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.squeeze())) + def test_net_uint32(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint32) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.squeeze())) + x = np.random.randn(1, 16, 1, 1).astype(np.uint32) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.squeeze())) + def test_net_int64(): - x = np.random.randn(1, 16, 1, 1).astype(np.int64) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.squeeze())) + x = np.random.randn(1, 16, 1, 1).astype(np.int64) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.squeeze())) + def test_net_uint64(): - x = np.random.randn(1, 16, 1, 1).astype(np.uint64) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.squeeze())) + x = np.random.randn(1, 16, 1, 1).astype(np.uint64) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.squeeze())) + def test_net_float16(): - x = np.random.randn(1, 16, 1, 1).astype(np.float16) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.squeeze())) + x = np.random.randn(1, 16, 1, 1).astype(np.float16) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.squeeze())) + def test_net_float32(): - x = np.random.randn(1, 16, 1, 1).astype(np.float32) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.squeeze())) + x = np.random.randn(1, 16, 1, 1).astype(np.float32) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.squeeze())) + def test_net_float64(): - x = np.random.randn(1, 16, 1, 1).astype(np.float64) - net = Net() - output = net(Tensor(x)) - print(output.asnumpy()) - assert(np.all(output.asnumpy() == x.squeeze())) - + x = np.random.randn(1, 16, 1, 1).astype(np.float64) + net = Net() + output = net(Tensor(x)) + print(output.asnumpy()) + assert (np.all(output.asnumpy() == x.squeeze())) diff --git a/tests/st/ops/ascend/test_apply_momentum.py b/tests/st/ops/ascend/test_apply_momentum.py index e20c4f4746..2cae2d2ff6 100644 --- a/tests/st/ops/ascend/test_apply_momentum.py +++ b/tests/st/ops/ascend/test_apply_momentum.py @@ -20,24 +20,29 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.apply_momentum = P.ApplyMomentum(gradient_scale=1024.0) self.variable = Parameter(initializer( - 'normal', [2, 3, 3, 4]), name='variable') + 'normal', [2, 3, 3, 4]), name='variable') self.accumulation = Parameter(initializer( - 'normal', [2, 3, 3, 4]), name='accumulation') + 'normal', [2, 3, 3, 4]), name='accumulation') self.learning_rate = Parameter(initializer( - 'normal', [1, ]), name='learning_rate') + 'normal', [1, ]), name='learning_rate') self.gradient = Parameter(initializer( - 'normal', [2, 3, 3, 4]), name='gradient') + 'normal', [2, 3, 3, 4]), name='gradient') self.momentum = Parameter(initializer( - 'normal', [1, ]), name='momentum') + 'normal', [1, ]), name='momentum') + def construct(self): return self.apply_momentum(self.variable, self.accumulation, self.learning_rate, self.gradient, self.momentum) + def test_net(): apply_momentum = Net() output = apply_momentum() diff --git a/tests/st/ops/ascend/test_biasAddGrad.py b/tests/st/ops/ascend/test_biasAddGrad.py index f2e8f7a9bc..83e51c71e5 100644 --- a/tests/st/ops/ascend/test_biasAddGrad.py +++ b/tests/st/ops/ascend/test_biasAddGrad.py @@ -21,22 +21,25 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(device_target="Ascend") + + class Net(nn.Cell): - def __init__(self): - super(Net, self).__init__() - self.bias_add_grad = G.BiasAddGrad() - #self.dout = Parameter(initializer( - #'normal', [2, 3, 3, 4]), name='dout') + def __init__(self): + super(Net, self).__init__() + self.bias_add_grad = G.BiasAddGrad() + # self.dout = Parameter(initializer( + # 'normal', [2, 3, 3, 4]), name='dout') + @ms_function + def construct(self, dout): + return self.bias_add_grad(dout) - @ms_function - def construct(self, dout): - return self.bias_add_grad(dout) -dout = np.ones([2,3,4,4]).astype(np.float32) +dout = np.ones([2, 3, 4, 4]).astype(np.float32) bias_add_grad = Net() output = bias_add_grad(Tensor(dout)) -expect_output = np.array([32.,32.,32.]).astype(np.float32) -assert np.all(output.asnumpy()==expect_output), "bias_add_grad execute failed, please check current code commit" +expect_output = np.array([32., 32., 32.]).astype(np.float32) +assert np.all(output.asnumpy() == expect_output), "bias_add_grad execute failed, please check current code commit" print(output.asnumpy()) diff --git a/tests/st/ops/ascend/test_bias_add_grad.py b/tests/st/ops/ascend/test_bias_add_grad.py index c6a51d8b3b..85e27b7600 100644 --- a/tests/st/ops/ascend/test_bias_add_grad.py +++ b/tests/st/ops/ascend/test_bias_add_grad.py @@ -21,17 +21,20 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.bias_add_grad = G.BiasAddGrad() - @ms_function def construct(self, dout): return self.bias_add_grad(dout) + def test_net(): dout = np.random.rand(1, 1001).astype(np.float32) bias_add_grad = Net() diff --git a/tests/st/ops/ascend/test_conv.py b/tests/st/ops/ascend/test_conv.py index f0f161da38..3984cb33f7 100644 --- a/tests/st/ops/ascend/test_conv.py +++ b/tests/st/ops/ascend/test_conv.py @@ -20,32 +20,33 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() out_channel = 64 kernel_size = 7 self.conv = P.Conv2D(out_channel, - kernel_size, - mode=1, - pad_mode="valid", - pad=0, - stride=1, - dilation=1, - group=1) + kernel_size, + mode=1, + pad_mode="valid", + pad=0, + stride=1, + dilation=1, + group=1) self.w = Parameter(initializer( - 'normal', [64, 3, 7, 7]), name='w') - + 'normal', [64, 3, 7, 7]), name='w') @ms_function def construct(self, x): return self.conv(x, self.w) - def test_net(): - x = np.random.randn(32,3,224,224).astype(np.float32) + x = np.random.randn(32, 3, 224, 224).astype(np.float32) conv = Net() output = conv(Tensor(x)) print(output.asnumpy()) diff --git a/tests/st/ops/ascend/test_conv2dGradFilter.py b/tests/st/ops/ascend/test_conv2dGradFilter.py index 73993c2ee5..945533426d 100644 --- a/tests/st/ops/ascend/test_conv2dGradFilter.py +++ b/tests/st/ops/ascend/test_conv2dGradFilter.py @@ -21,37 +21,40 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(device_target="Ascend") + class Net(nn.Cell): - def __init__(self): - super(Net, self).__init__() - self.conv2d_grad = G.Conv2DBackpropFilter(4,1) - yt = Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)) - self.y = Parameter(yt, name='y') - self.get_shape = P.Shape() + def __init__(self): + super(Net, self).__init__() + self.conv2d_grad = G.Conv2DBackpropFilter(4, 1) + yt = Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)) + self.y = Parameter(yt, name='y') + self.get_shape = P.Shape() + + @ms_function + def construct(self, x, out): + return self.conv2d_grad(out, x, self.get_shape(self.y)) - @ms_function - def construct(self, x, out): - return self.conv2d_grad(out, x, self.get_shape(self.y)) x = Tensor(np.array([[[ - [3, 0, 1, 2, 7, 4], - [1, 5, 8, 9, 3, 1], - [2, 7, 2, 5, 1, 3], - [0, 1, 3, 1, 7, 8], - [4, 2, 1, 6, 2, 8], - [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)) + [3, 0, 1, 2, 7, 4], + [1, 5, 8, 9, 3, 1], + [2, 7, 2, 5, 1, 3], + [0, 1, 3, 1, 7, 8], + [4, 2, 1, 6, 2, 8], + [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)) out = Tensor(np.array([[[ - [ -5, -4, 0, 8], - [-10, -2, 2, 3], - [ 0, -2, -4, -7], - [ -3, -2, -3, -16]]]]).astype(np.float32)) + [-5, -4, 0, 8], + [-10, -2, 2, 3], + [0, -2, -4, -7], + [-3, -2, -3, -16]]]]).astype(np.float32)) operator = Net() output = operator(x, out) -expect_out = np.array([[[[ -60., -142., -265.],[-104., -211., -322.],[-102., -144., -248.]]]]).astype(np.float32) +expect_out = np.array([[[[-60., -142., -265.], [-104., -211., -322.], [-102., -144., -248.]]]]).astype(np.float32) print(output.asnumpy()) print(expect_out) -assert np.all(output.asnumpy()==expect_out), "conv2d_grad execute failed, please check current code commit" +assert np.all(output.asnumpy() == expect_out), "conv2d_grad execute failed, please check current code commit" diff --git a/tests/st/ops/ascend/test_conv_grad.py b/tests/st/ops/ascend/test_conv_grad.py index b693e7fe80..0d6de26afc 100644 --- a/tests/st/ops/ascend/test_conv_grad.py +++ b/tests/st/ops/ascend/test_conv_grad.py @@ -21,8 +21,10 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.ops.composite import GradOperation + context.set_context(device_target="Ascend") + class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() @@ -33,26 +35,28 @@ class Grad(nn.Cell): def construct(self, input, output_grad): return self.grad(self.network)(input, output_grad) + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() out_channel = 512 kernel_size = 2048 self.conv = P.Conv2D(out_channel, - (kernel_size, kernel_size), - mode=1, - pad_mode="same", - pad=3, - stride=2, - dilation=1, - group=1) + (kernel_size, kernel_size), + mode=1, + pad_mode="same", + pad=3, + stride=2, + dilation=1, + group=1) self.w = Parameter(initializer( - 'normal', [512, 2048, 1, 1]), name='w') + 'normal', [512, 2048, 1, 1]), name='w') @ms_function def construct(self, x): return self.conv(x, self.w) + def test_net(): x = np.ones([32, 2048, 7, 7]).astype(np.float32) sens = np.ones([32, 512, 7, 7]).astype(np.float32) diff --git a/tests/st/ops/ascend/test_dense.py b/tests/st/ops/ascend/test_dense.py index 2866ba9242..c3652237ce 100644 --- a/tests/st/ops/ascend/test_dense.py +++ b/tests/st/ops/ascend/test_dense.py @@ -20,7 +20,10 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -30,6 +33,7 @@ class Net(nn.Cell): def construct(self, x): return self.dense(x) + def test_net(): x = np.random.randn(32, 2048).astype(np.float32) net = Net() diff --git a/tests/st/ops/ascend/test_dense_grad.py b/tests/st/ops/ascend/test_dense_grad.py index a5ee37842e..071a2e8773 100644 --- a/tests/st/ops/ascend/test_dense_grad.py +++ b/tests/st/ops/ascend/test_dense_grad.py @@ -21,8 +21,10 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.ops.composite import GradOperation + context.set_context(device_target="Ascend") + class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() @@ -33,6 +35,7 @@ class Grad(nn.Cell): def construct(self, input, output_grad): return self.grad(self.network)(input, output_grad) + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -41,6 +44,7 @@ class Net(nn.Cell): def construct(self, x): return self.dense(x) + def test_net(): x = np.random.randn(32, 2048).astype(np.float32) sens = np.random.randn(32, 1001).astype(np.float32) diff --git a/tests/st/ops/ascend/test_drop_out_gen_mask.py b/tests/st/ops/ascend/test_drop_out_gen_mask.py index ce7ebbfbe0..3d3e2ca237 100644 --- a/tests/st/ops/ascend/test_drop_out_gen_mask.py +++ b/tests/st/ops/ascend/test_drop_out_gen_mask.py @@ -17,6 +17,7 @@ from mindspore.ops import operations as P import mindspore.nn as nn import numpy as np import mindspore.context as context + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") diff --git a/tests/st/ops/ascend/test_full_connection.py b/tests/st/ops/ascend/test_full_connection.py index d6aec19aa7..534c5d8088 100644 --- a/tests/st/ops/ascend/test_full_connection.py +++ b/tests/st/ops/ascend/test_full_connection.py @@ -21,6 +21,7 @@ import mindspore.context as context context.set_context(device_target="Ascend") + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() diff --git a/tests/st/ops/ascend/test_fused_batchnorm.py b/tests/st/ops/ascend/test_fused_batchnorm.py index 5d02e716b3..a5248203eb 100644 --- a/tests/st/ops/ascend/test_fused_batchnorm.py +++ b/tests/st/ops/ascend/test_fused_batchnorm.py @@ -20,7 +20,10 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -35,7 +38,7 @@ class Net(nn.Cell): def test_net(): - x = np.random.randn(1,64,112,112).astype(np.float32) + x = np.random.randn(1, 64, 112, 112).astype(np.float32) # mean = np.random.randn(1,16,1,1).astype(np.float32) # variance = np.random.randn(1,16,1,1).astype(np.float32) fusedBn = Net() @@ -45,4 +48,3 @@ def test_net(): print("***********output y*********") print(output.asnumpy()) - diff --git a/tests/st/ops/ascend/test_fused_batchnorm_grad.py b/tests/st/ops/ascend/test_fused_batchnorm_grad.py index b61003b30e..3d13bd8b54 100644 --- a/tests/st/ops/ascend/test_fused_batchnorm_grad.py +++ b/tests/st/ops/ascend/test_fused_batchnorm_grad.py @@ -21,8 +21,11 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.ops.composite import GradOperation -#context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + +# context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_context(device_target="Ascend") + + class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() @@ -33,6 +36,7 @@ class Grad(nn.Cell): def construct(self, input, output_grad): return self.grad(self.network)(input, output_grad) + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -47,8 +51,8 @@ class Net(nn.Cell): def test_net(): - x = np.random.randn(1,64,112,112).astype(np.float32) - sens = np.random.randn(1,64,112,112).astype(np.float32) + x = np.random.randn(1, 64, 112, 112).astype(np.float32) + sens = np.random.randn(1, 64, 112, 112).astype(np.float32) net = Grad(Net()) output = net(Tensor(x), Tensor(sens)) print("***********x*********") diff --git a/tests/st/ops/ascend/test_image_gradients.py b/tests/st/ops/ascend/test_image_gradients.py index ea385158c9..8d1fbd6ac2 100644 --- a/tests/st/ops/ascend/test_image_gradients.py +++ b/tests/st/ops/ascend/test_image_gradients.py @@ -20,6 +20,8 @@ from mindspore import Tensor from mindspore.common.api import ms_function context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -31,32 +33,32 @@ class Net(nn.Cell): def test_image_gradients(): - image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mstype.int32) - expected_dy = np.array([[[[2,2],[0,0]]]]).astype(np.int32) - expected_dx = np.array([[[[1,0],[1,0]]]]).astype(np.int32) + image = Tensor(np.array([[[[1, 2], [3, 4]]]]), dtype=mstype.int32) + expected_dy = np.array([[[[2, 2], [0, 0]]]]).astype(np.int32) + expected_dx = np.array([[[[1, 0], [1, 0]]]]).astype(np.int32) net = Net() dy, dx = net(image) - assert np.any(dx.asnumpy()-expected_dx) == False - assert np.any(dy.asnumpy()-expected_dy) == False + assert np.any(dx.asnumpy() - expected_dx) == False + assert np.any(dy.asnumpy() - expected_dy) == False def test_image_gradients_multi_channel_depth(): # 4 x 2 x 2 x 2 dtype = mstype.int32 - image = Tensor(np.array([[[[1,2],[3,4]], [[5,6],[7,8]]], - [[[3,5],[7,9]], [[11,13],[15,17]]], - [[[5,10],[15,20]], [[25,30],[35,40]]], - [[[10,20],[30,40]], [[50,60],[70,80]]]]), dtype=dtype) - expected_dy = Tensor(np.array([[[[2,2],[0,0]], [[2,2],[0,0]]], - [[[4,4],[0,0]], [[4,4],[0,0]]], - [[[10,10],[0,0]], [[10,10],[0,0]]], - [[[20,20],[0,0]], [[20,20],[0,0]]]]), dtype=dtype) - expected_dx = Tensor(np.array([[[[1,0],[1,0]], [[1,0],[1,0]]], - [[[2,0],[2,0]], [[2,0],[2,0]]], - [[[5,0],[5,0]], [[5,0],[5,0]]], - [[[10,0],[10,0]], [[10,0],[10,0]]]]), dtype=dtype) + image = Tensor(np.array([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], + [[[3, 5], [7, 9]], [[11, 13], [15, 17]]], + [[[5, 10], [15, 20]], [[25, 30], [35, 40]]], + [[[10, 20], [30, 40]], [[50, 60], [70, 80]]]]), dtype=dtype) + expected_dy = Tensor(np.array([[[[2, 2], [0, 0]], [[2, 2], [0, 0]]], + [[[4, 4], [0, 0]], [[4, 4], [0, 0]]], + [[[10, 10], [0, 0]], [[10, 10], [0, 0]]], + [[[20, 20], [0, 0]], [[20, 20], [0, 0]]]]), dtype=dtype) + expected_dx = Tensor(np.array([[[[1, 0], [1, 0]], [[1, 0], [1, 0]]], + [[[2, 0], [2, 0]], [[2, 0], [2, 0]]], + [[[5, 0], [5, 0]], [[5, 0], [5, 0]]], + [[[10, 0], [10, 0]], [[10, 0], [10, 0]]]]), dtype=dtype) net = Net() dy, dx = net(image) - assert np.any(dx.asnumpy()-expected_dx.asnumpy()) == False - assert np.any(dy.asnumpy()-expected_dy.asnumpy()) == False + assert np.any(dx.asnumpy() - expected_dx.asnumpy()) == False + assert np.any(dy.asnumpy() - expected_dy.asnumpy()) == False diff --git a/tests/st/ops/ascend/test_matmul.py b/tests/st/ops/ascend/test_matmul.py index c5dfc13af1..01701f5285 100644 --- a/tests/st/ops/ascend/test_matmul.py +++ b/tests/st/ops/ascend/test_matmul.py @@ -20,7 +20,10 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -30,8 +33,10 @@ class Net(nn.Cell): def construct(self, x1, x2): return self.matmul(x1, x2) -x1 = np.random.randn(1,3).astype(np.float32) -x2 = np.random.randn(3,4).astype(np.float32) + +x1 = np.random.randn(1, 3).astype(np.float32) +x2 = np.random.randn(3, 4).astype(np.float32) + def test_net(): matmul = Net() diff --git a/tests/st/ops/ascend/test_maxpool.py b/tests/st/ops/ascend/test_maxpool.py index 3b9ecc29d8..da1f2b9cae 100644 --- a/tests/st/ops/ascend/test_maxpool.py +++ b/tests/st/ops/ascend/test_maxpool.py @@ -20,12 +20,13 @@ import numpy as np import mindspore.context as context context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.maxpool = P.MaxPool(pad_mode="SAME", window=3, stride=2) - @ms_function def construct(self, x): output = self.maxpool(x) @@ -33,7 +34,7 @@ class Net(nn.Cell): def test_net(): - x = np.random.randn(32,64,112,112).astype(np.float32) + x = np.random.randn(32, 64, 112, 112).astype(np.float32) maxpool = Net() output = maxpool(Tensor(x)) print(output.asnumpy()) diff --git a/tests/st/ops/ascend/test_maxpool_grad.py b/tests/st/ops/ascend/test_maxpool_grad.py index e3d845707c..402d6f719b 100644 --- a/tests/st/ops/ascend/test_maxpool_grad.py +++ b/tests/st/ops/ascend/test_maxpool_grad.py @@ -19,6 +19,7 @@ from mindspore.common.api import ms_function import numpy as np import mindspore.context as context from mindspore.ops.composite import GradOperation + context.set_context(device_target="Ascend") diff --git a/tests/st/ops/ascend/test_maxpool_with_argmax_grad.py b/tests/st/ops/ascend/test_maxpool_with_argmax_grad.py index 3bbc835c1b..180dc088a8 100644 --- a/tests/st/ops/ascend/test_maxpool_with_argmax_grad.py +++ b/tests/st/ops/ascend/test_maxpool_with_argmax_grad.py @@ -21,8 +21,10 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.ops.composite import GradOperation + context.set_context(device_target="Ascend") + class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() @@ -33,6 +35,7 @@ class Grad(nn.Cell): def construct(self, input, output_grad): return self.grad(self.network)(input, output_grad) + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -43,8 +46,9 @@ class Net(nn.Cell): @ms_function def construct(self, x): - output = self.maxpool(x) - return output[0] + output = self.maxpool(x) + return output[0] + def test_net(): x = np.random.randn(32, 64, 112, 112).astype(np.float32) diff --git a/tests/st/ops/ascend/test_relu.py b/tests/st/ops/ascend/test_relu.py index 2e8aa46c24..1b6ed298d3 100644 --- a/tests/st/ops/ascend/test_relu.py +++ b/tests/st/ops/ascend/test_relu.py @@ -20,7 +20,10 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -30,8 +33,9 @@ class Net(nn.Cell): def construct(self, x): return self.relu(x) + def test_net(): - x = np.random.randn(2,3,3,4).astype(np.float32) + x = np.random.randn(2, 3, 3, 4).astype(np.float32) relu = Net() output = relu(Tensor(x)) print(x) diff --git a/tests/st/ops/ascend/test_relu_grad.py b/tests/st/ops/ascend/test_relu_grad.py index dd13544b80..5839218b42 100644 --- a/tests/st/ops/ascend/test_relu_grad.py +++ b/tests/st/ops/ascend/test_relu_grad.py @@ -21,8 +21,10 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.ops.composite import GradOperation + context.set_context(device_target="Ascend") + class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() @@ -33,6 +35,7 @@ class Grad(nn.Cell): def construct(self, input, output_grad): return self.grad(self.network)(input, output_grad) + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -41,9 +44,10 @@ class Net(nn.Cell): def construct(self, x): return self.relu(x) + def test_net(): - x = np.random.randn(2,3,3,4).astype(np.float32) - sens = np.random.randn(2,3,3,4).astype(np.float32) + x = np.random.randn(2, 3, 3, 4).astype(np.float32) + sens = np.random.randn(2, 3, 3, 4).astype(np.float32) net = Grad(Net()) output = net(Tensor(x), Tensor(sens)) print(len(output)) diff --git a/tests/st/ops/ascend/test_reshape.py b/tests/st/ops/ascend/test_reshape.py index c0f9cf4d9c..66f2dd743b 100644 --- a/tests/st/ops/ascend/test_reshape.py +++ b/tests/st/ops/ascend/test_reshape.py @@ -18,18 +18,22 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") + + class Net(nn.Cell): - def __init__(self): - super(Net, self).__init__() - self.reshape = P.Reshape() - @ms_function - def construct(self, tensor): - return self.reshape(tensor, (1,16)) + def __init__(self): + super(Net, self).__init__() + self.reshape = P.Reshape() + + @ms_function + def construct(self, tensor): + return self.reshape(tensor, (1, 16)) def test_net(): - x = np.random.randn(1, 16, 1, 1).astype(np.float16) - reshape = Net() - output = reshape(Tensor(x)) - print(output.asnumpy()) + x = np.random.randn(1, 16, 1, 1).astype(np.float16) + reshape = Net() + output = reshape(Tensor(x)) + print(output.asnumpy()) diff --git a/tests/st/ops/ascend/test_simplemean.py b/tests/st/ops/ascend/test_simplemean.py index dbc7a6add9..b1280aec72 100644 --- a/tests/st/ops/ascend/test_simplemean.py +++ b/tests/st/ops/ascend/test_simplemean.py @@ -20,7 +20,10 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -29,7 +32,8 @@ class Net(nn.Cell): @ms_function def construct(self, x): return self.simplemean(x, (-2, -1)) - + + def test_net(): x = np.random.randn(32, 2048, 7, 7).astype(np.float32) simplemean = Net() diff --git a/tests/st/ops/ascend/test_simplemean_grad.py b/tests/st/ops/ascend/test_simplemean_grad.py index c5b37eb843..bdffa32e66 100644 --- a/tests/st/ops/ascend/test_simplemean_grad.py +++ b/tests/st/ops/ascend/test_simplemean_grad.py @@ -21,8 +21,10 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.ops.composite import GradOperation + context.set_context(device_target="Ascend") + class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() @@ -33,6 +35,7 @@ class Grad(nn.Cell): def construct(self, input, output_grad): return self.grad(self.network)(input, output_grad) + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -41,9 +44,10 @@ class Net(nn.Cell): def construct(self, x): return self.simplemean(x, (-2, -1)) + def test_net(): - x = np.random.randn(32,2048,7,7).astype(np.float32) - sens = np.random.randn(32,2048, 1, 1).astype(np.float32) + x = np.random.randn(32, 2048, 7, 7).astype(np.float32) + sens = np.random.randn(32, 2048, 1, 1).astype(np.float32) net = Grad(Net()) output = net(Tensor(x), Tensor(sens)) print(output.asnumpy()) diff --git a/tests/st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py b/tests/st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py index 5f143ef037..f4f801a685 100644 --- a/tests/st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py +++ b/tests/st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py @@ -18,6 +18,7 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") @@ -30,9 +31,10 @@ class Net(nn.Cell): def construct(self, features, labels): return self.SparseSoftmaxCrossEntropyWithLogits(features, labels) + def np_sparse_softmax_cross_entropy_with_logits(labels_shape, logits_shape, logits_dtype): num_class = logits_shape[1] - labels = np.random.randint(low=0, high=num_class - 1, size=labels_shape).astype(np.int32) + labels = np.random.randint(low=0, high=num_class - 1, size=labels_shape).astype(np.int32) logits = np.random.rand(*logits_shape).astype(logits_dtype) features = logits features_reshape = np.reshape(features, [-1, num_class]) @@ -48,7 +50,7 @@ def np_sparse_softmax_cross_entropy_with_logits(labels_shape, logits_shape, logi loss = -np.sum(labels_mat * np.log(probs + 1.0e-20), axis=1) bp_res = np.reshape(bp, features.shape) loss_res = np.reshape(loss, labels.shape) - loss_res = np.sum(loss_res, axis=0)/loss_res.shape[0] + loss_res = np.sum(loss_res, axis=0) / loss_res.shape[0] return labels, logits, loss_res, bp_res @@ -65,4 +67,6 @@ def test_net(): print(loss_me.asnumpy().flatten()) print("-------------------------") print(expect) + + test_net() diff --git a/tests/st/ops/ascend/test_sparse_softmax_cross_entropy_with_logits_grad.py b/tests/st/ops/ascend/test_sparse_softmax_cross_entropy_with_logits_grad.py index d36873d426..fd5b31564f 100644 --- a/tests/st/ops/ascend/test_sparse_softmax_cross_entropy_with_logits_grad.py +++ b/tests/st/ops/ascend/test_sparse_softmax_cross_entropy_with_logits_grad.py @@ -21,6 +21,7 @@ import mindspore.context as context context.set_context(device_target="Ascend") + class Net(nn.Cell): def __init__(self, is_grad=False): super(Net, self).__init__() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_AssignAdd.py b/tests/st/ops/ascend/test_tbe_ops/test_AssignAdd.py index 59d8a0cade..f8e14a5b67 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_AssignAdd.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_AssignAdd.py @@ -20,11 +20,13 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") class Net(nn.Cell): """Net definition""" + def __init__(self): super(Net, self).__init__() self.AssignAdd = P.AssignAdd() @@ -39,8 +41,8 @@ class Net(nn.Cell): def test_net(): """test AssignAdd""" net = Net() - x = Tensor(np.ones([1]).astype(np.float32)*100) + x = Tensor(np.ones([1]).astype(np.float32) * 100) print("MyPrintResult dataX:", x) result = net(x) - print("MyPrintResult data::", result.asnumpy()) \ No newline at end of file + print("MyPrintResult data::", result.asnumpy()) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_AssignSub.py b/tests/st/ops/ascend/test_tbe_ops/test_AssignSub.py index ad37f8c53f..fd73325401 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_AssignSub.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_AssignSub.py @@ -20,11 +20,13 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") class Net(nn.Cell): """Net definition""" + def __init__(self): super(Net, self).__init__() self.AssignSub = P.AssignSub() @@ -39,8 +41,8 @@ class Net(nn.Cell): def test_net(): """test AssignSub""" net = Net() - x = Tensor(np.ones([1]).astype(np.int32)*100) + x = Tensor(np.ones([1]).astype(np.int32) * 100) print("MyPrintResult dataX:", x) result = net(x) - print("MyPrintResult data::", result.asnumpy()) \ No newline at end of file + print("MyPrintResult data::", result.asnumpy()) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_ReduceMean.py b/tests/st/ops/ascend/test_tbe_ops/test_ReduceMean.py index 7975ef190b..91a50e3f28 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_ReduceMean.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_ReduceMean.py @@ -20,7 +20,10 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self, keep_dims, axis): super(Net, self).__init__() @@ -31,8 +34,10 @@ class Net(nn.Cell): def construct(self, inputs): return self.reduce_mean(inputs, self.axis) + x1 = np.random.randn(64).astype(np.float32) + def test_net(): keepdims = False axis = -1 diff --git a/tests/st/ops/ascend/test_tbe_ops/test_add.py b/tests/st/ops/ascend/test_tbe_ops/test_add.py index dd8515868e..b58a7859ab 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_add.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_add.py @@ -21,6 +21,7 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -29,8 +30,9 @@ class Net(nn.Cell): def construct(self, x, y): return self.add(x, y) -x = np.random.randn(1,3,3,4).astype(np.float32) -y = np.random.randn(1,3,3,4).astype(np.float32) + +x = np.random.randn(1, 3, 3, 4).astype(np.float32) +y = np.random.randn(1, 3, 3, 4).astype(np.float32) def test_net(): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_addn.py b/tests/st/ops/ascend/test_tbe_ops/test_addn.py index 4defa7d629..1765d50fd1 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_addn.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_addn.py @@ -20,15 +20,19 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.add = P.AddN() - + def construct(self, x, y): return self.add((x, y)) + def test_net(): x = np.random.randn(1, 3, 3, 4).astype(np.float32) y = np.random.randn(1, 3, 3, 4).astype(np.float32) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_apply_adam.py b/tests/st/ops/ascend/test_tbe_ops/test_apply_adam.py index f54de92144..3713de2c32 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_apply_adam.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_apply_adam.py @@ -19,6 +19,7 @@ from mindspore.nn import Dense, SoftmaxCrossEntropyWithLogits from mindspore.nn import TrainOneStepCell, WithLossCell import mindspore.context as context + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", impl_type="tbe") context.set_context(enable_task_sink=True) @@ -44,16 +45,16 @@ class Adam: label = Tensor(label_np_onehot) ms_dense = Dense(in_channels=self.input_channels, - out_channels=self.output_channels, - weight_init=weight_np, - bias_init=bias, has_bias=True) + out_channels=self.output_channels, + weight_init=weight_np, + bias_init=bias, has_bias=True) criterion = SoftmaxCrossEntropyWithLogits() optimizer = nn.Adam(ms_dense.trainable_params(), - learning_rate=1e-3, - beta1=0.9, beta2=0.999, eps=self.epsilon, - use_locking=False, - use_nesterov=False, weight_decay=0.0, - loss_scale=1.0) + learning_rate=1e-3, + beta1=0.9, beta2=0.999, eps=self.epsilon, + use_locking=False, + use_nesterov=False, weight_decay=0.0, + loss_scale=1.0) net_with_criterion = WithLossCell(ms_dense, criterion) train_network = TrainOneStepCell(net_with_criterion, optimizer) @@ -68,5 +69,5 @@ class Adam: def test_adam(): - fact = Adam(batch_num=8, input_channels=20, output_channels=5, epoch=5, lr=0.1, weight_decay=0.0, epsilon= 1e-8) + fact = Adam(batch_num=8, input_channels=20, output_channels=5, epoch=5, lr=0.1, weight_decay=0.0, epsilon=1e-8) fact.train_mindspore_impl() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_apply_momentum.py b/tests/st/ops/ascend/test_tbe_ops/test_apply_momentum.py index f1d9abdaf4..0db4d9e972 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_apply_momentum.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_apply_momentum.py @@ -21,23 +21,26 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.apply_momentum = P.ApplyMomentum(gradient_scale=1024.0) self.variable = Parameter(initializer( - 'normal', [2, 3, 3, 4]), name='variable') + 'normal', [2, 3, 3, 4]), name='variable') self.accumulation = Parameter(initializer( - 'normal', [2, 3, 3, 4]), name='accumulation') + 'normal', [2, 3, 3, 4]), name='accumulation') self.learning_rate = Parameter(initializer( - 'normal', [1, ]), name='learning_rate') + 'normal', [1, ]), name='learning_rate') self.gradient = Parameter(initializer( - 'normal', [2, 3, 3, 4]), name='gradient') + 'normal', [2, 3, 3, 4]), name='gradient') self.momentum = Parameter(initializer( - 'normal', [1, ]), name='momentum') + 'normal', [1, ]), name='momentum') + def construct(self): return self.apply_momentum(self.variable, self.accumulation, self.learning_rate, self.gradient, self.momentum) + def test_net(): context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") apply_momentum = Net() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_batchmatmul.py b/tests/st/ops/ascend/test_tbe_ops/test_batchmatmul.py index 4d9e8de402..72fb57a0fa 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_batchmatmul.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_batchmatmul.py @@ -19,8 +19,10 @@ from mindspore.nn import Cell from mindspore.train.model import Model import pytest from mindspore import context + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + class Net(Cell): def __init__(self): super(Net, self).__init__() @@ -30,17 +32,20 @@ class Net(Cell): x = self.batchmatmul(inputa, inputb) return x + def tf_me_batchmatmul(inputa, inputb): net = Net() net.set_train() model = Model(net) out_me = model.predict(Tensor(inputa), Tensor(inputb)) + def test_batchmatmul_normal_shape1(): inputa = np.random.randn(128, 16, 128).astype(np.float32) inputb = np.random.randn(128, 128, 64).astype(np.float32) tf_me_batchmatmul(Tensor(inputa), Tensor(inputb)) + def test_batchmatmul_normal_shape2(): inputa = np.random.randn(1, 16, 128, 128).astype(np.float32) inputb = np.random.randn(1, 16, 128, 64).astype(np.float32) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_batchnorm.py b/tests/st/ops/ascend/test_tbe_ops/test_batchnorm.py index 78610f3f87..2415331050 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_batchnorm.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_batchnorm.py @@ -21,6 +21,7 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -35,7 +36,7 @@ class Net(nn.Cell): def test_net(): - x = np.random.randn(1,64,112,112).astype(np.float32) + x = np.random.randn(1, 64, 112, 112).astype(np.float32) # mean = np.random.randn(1,16,1,1).astype(np.float32) # variance = np.random.randn(1,16,1,1).astype(np.float32) context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") @@ -55,4 +56,3 @@ def test_net(): print("***********output y*********") print(output.asnumpy()) - diff --git a/tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py index a893a3a8d3..bd4bdcdf48 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py @@ -21,8 +21,11 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.ops.composite import GradOperation -#context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + +# context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_context(device_target="Ascend") + + class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() @@ -48,7 +51,7 @@ class Net(nn.Cell): def test_net(): - x = np.random.randn(1,64,112,112).astype(np.float32) - sens = np.random.randn(1,64,112,112).astype(np.float32) + x = np.random.randn(1, 64, 112, 112).astype(np.float32) + sens = np.random.randn(1, 64, 112, 112).astype(np.float32) net = Grad(Net()) output = net(Tensor(x), Tensor(sens)) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_bias_add.py b/tests/st/ops/ascend/test_tbe_ops/test_bias_add.py index 8dbea18b95..94adfcb7e6 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_bias_add.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_bias_add.py @@ -20,11 +20,13 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") class Net(nn.Cell): """Net definition""" + def __init__(self, output_channels, bias_init='zeros', @@ -51,4 +53,3 @@ def test_compile(): # enable it when staging function is ready output = net(input_data) print(output.asnumpy()) - diff --git a/tests/st/ops/ascend/test_tbe_ops/test_bias_add_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_bias_add_grad.py index 57f0649e7c..cafecb9fd9 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_bias_add_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_bias_add_grad.py @@ -21,7 +21,10 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -31,6 +34,7 @@ class Net(nn.Cell): def construct(self, dout): return self.bias_add_grad(dout) + def test_net(): dout = np.random.rand(1, 1001).astype(np.float32) bias_add_grad = Net() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_concat.py b/tests/st/ops/ascend/test_tbe_ops/test_concat.py index debc18ab14..72e67f5ee5 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_concat.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_concat.py @@ -20,11 +20,12 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") class Net(nn.Cell): - def __init__( self): + def __init__(self): super(Net, self).__init__() self.cat = P.Concat(axis=1) @@ -46,4 +47,4 @@ def test_net(): print(np.arange(2 * 2).reshape(2, 2)) print(np.arange(2 * 3).reshape(2, 3)) print(output) - assert(output.asnumpy() == expect).all() + assert (output.asnumpy() == expect).all() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_conv.py b/tests/st/ops/ascend/test_tbe_ops/test_conv.py index e6eb880e1e..2cec372b03 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_conv.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_conv.py @@ -21,31 +21,30 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() out_channel = 64 kernel_size = 7 self.conv = P.Conv2D(out_channel, - kernel_size, - mode=1, - pad_mode="valid", - pad=0, - stride=1, - dilation=1, - group=1) + kernel_size, + mode=1, + pad_mode="valid", + pad=0, + stride=1, + dilation=1, + group=1) self.w = Parameter(initializer( - 'normal', [64, 3, 7, 7]), name='w') - + 'normal', [64, 3, 7, 7]), name='w') @ms_function def construct(self, x): return self.conv(x, self.w) - def test_net(): - x = np.random.randn(32,3,224,224).astype(np.float32) + x = np.random.randn(32, 3, 224, 224).astype(np.float32) context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") conv = Net() output = conv(Tensor(x)) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_filter.py b/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_filter.py index 554fb9ab9b..f2137963bb 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_filter.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_filter.py @@ -21,6 +21,7 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(device_target='Ascend') @@ -37,19 +38,21 @@ class Net(nn.Cell): stride=1, dilation=1, group=1) - self.w = Parameter(initializer(Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='w') + self.w = Parameter( + initializer(Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), + name='w') self.x = Parameter(initializer(Tensor(np.array([[[ [3, 0, 1, 2, 7, 4], [1, 5, 8, 9, 3, 1], [2, 7, 2, 5, 1, 3], [0, 1, 3, 1, 7, 8], [4, 2, 1, 6, 2, 8], - [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)), [1,1,6,6]), name='x') + [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)), [1, 1, 6, 6]), name='x') self.out = Parameter(initializer(Tensor(np.array([[[ - [ -5, -4, 0, 8], - [-10, -2, 2, 3], - [ 0, -2, -4, -7], - [ -3, -2, -3, -16]]]]).astype(np.float32)),[1,1,4,4]), name='y') + [-5, -4, 0, 8], + [-10, -2, 2, 3], + [0, -2, -4, -7], + [-3, -2, -3, -16]]]]).astype(np.float32)), [1, 1, 4, 4]), name='y') self.get_shape = P.Shape() @ms_function @@ -67,7 +70,7 @@ def test_conv2d_backprop_filter(): [-104, -211, -322] [-102, -144, -248]]]] """ - expect = np.array([[[[ -60, -142, -265], + expect = np.array([[[[-60, -142, -265], [-104, -211, -322], [-102, -144, -248]]]]).astype(np.float32) print(output) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_input.py b/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_input.py index 961c7fdbe5..1787cc25b4 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_input.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_input.py @@ -20,6 +20,7 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(device_target="Ascend") @@ -36,19 +37,21 @@ class Net(nn.Cell): stride=1, dilation=1, group=1) - self.w = Parameter(initializer(Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='w') + self.w = Parameter( + initializer(Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), + name='w') self.x = Parameter(initializer(Tensor(np.array([[[ [3, 0, 1, 2, 7, 4], [1, 5, 8, 9, 3, 1], [2, 7, 2, 5, 1, 3], [0, 1, 3, 1, 7, 8], [4, 2, 1, 6, 2, 8], - [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)), [1,1,6,6]), name='x') + [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)), [1, 1, 6, 6]), name='x') self.out = Parameter(initializer(Tensor(np.array([[[ - [ -5, -4, 0, 8], - [-10, -2, 2, 3], - [ 0, -2, -4, -7], - [ -3, -2, -3, -16]]]]).astype(np.float32)),[1,1,4,4]), name='y') + [-5, -4, 0, 8], + [-10, -2, 2, 3], + [0, -2, -4, -7], + [-3, -2, -3, -16]]]]).astype(np.float32)), [1, 1, 4, 4]), name='y') self.get_shape = P.Shape() @ms_function @@ -69,11 +72,11 @@ def test_conv2d_backprop_input(): [ -3, -4, -4, -19, 7, 23] [ -3, -2, 0, -14, 3, 16]]]] """ - expect = np.array([[[[ -5, -4, 5, 12, 0, -8], - [-15, -6, 17, 17, -2, -11], - [-15, -8, 13, 12, 2, -4], - [-13, -6, 8, -14, 5, 20], - [ -3, -4, -4, -19, 7, 23], - [ -3, -2, 0, -14, 3, 16]]]]).astype(np.float32) + expect = np.array([[[[-5, -4, 5, 12, 0, -8], + [-15, -6, 17, 17, -2, -11], + [-15, -8, 13, 12, 2, -4], + [-13, -6, 8, -14, 5, 20], + [-3, -4, -4, -19, 7, 23], + [-3, -2, 0, -14, 3, 16]]]]).astype(np.float32) print(output) assert (output.asnumpy() == expect).all() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_dropout_do_mask.py b/tests/st/ops/ascend/test_tbe_ops/test_dropout_do_mask.py index b0a0ee2589..4e223b2ce0 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_dropout_do_mask.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_dropout_do_mask.py @@ -20,9 +20,11 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") from mindspore import log as logger + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -33,7 +35,7 @@ class Net(nn.Cell): def test_net(): - x = np.random.randn(2,5,8).astype(np.float32) + x = np.random.randn(2, 5, 8).astype(np.float32) mask = np.random.randn(16).astype(np.uint8) keep_prob = 1 @@ -48,4 +50,3 @@ def test_net(): logger.info("***********output y*********") logger.info(output.asnumpy()) - diff --git a/tests/st/ops/ascend/test_tbe_ops/test_gelu.py b/tests/st/ops/ascend/test_tbe_ops/test_gelu.py index fb66d94430..346823eaed 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_gelu.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_gelu.py @@ -21,6 +21,7 @@ import math import pytest from mindspore import context from mindspore import log as logger + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") @@ -52,6 +53,7 @@ def test_gelu_input_dim_0(): with pytest.raises(ValueError): gelu_forward_cmp(input_shape) + def test_gelu_input_dim_10240_1024(): input_shape = [10240, 1024] gelu_forward_cmp(input_shape) @@ -96,6 +98,7 @@ def test_gelu_input_dim_128_4096(): input_shape = [128, 4096] gelu_forward_cmp(input_shape) + @pytest.mark.lower_bs def test_gelu_input_dim_160_1024(): input_shape = [160, 1024] diff --git a/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py b/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py index 93d25cd096..3041d33166 100755 --- a/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py @@ -25,6 +25,7 @@ from mindspore import log as logger context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() @@ -55,6 +56,7 @@ def gelu_backward_cmp(input_shape): logger.info("---------me--------") logger.info(output_grad_me) + # ---------- LARGE INPUT --------------- class MEGeluLargeIn(Cell): @@ -67,6 +69,7 @@ class MEGeluLargeIn(Cell): x = self.matmul(x1, x2) return self.gelu(x) + class GradLargeIn(Cell): def __init__(self, network): super(GradLargeIn, self).__init__() @@ -86,5 +89,5 @@ def gelu_backward_me_large_in_impl(x1, x2, output_grad): def test_grad_gelu_input_10240_1024(): - input_shape = [10240,1024] + input_shape = [10240, 1024] gelu_backward_cmp(input_shape) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_greater.py b/tests/st/ops/ascend/test_tbe_ops/test_greater.py index b9dae700c2..793b7208f4 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_greater.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_greater.py @@ -20,8 +20,10 @@ from mindspore.common.tensor import Tensor from mindspore.train.model import Model from mindspore import log as logger from mindspore import context + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + class Greater(Cell): def __init__(self): super(Greater, self).__init__() @@ -30,6 +32,7 @@ class Greater(Cell): def construct(self, inputa, inputb): return self.greater(inputa, inputb) + def me_greater(inputa, inputb): net = Greater() net.set_train() @@ -42,10 +45,11 @@ def me_greater(inputa, inputb): logger.info(inputb) return out.asnumpy() + @pytest.mark.ssd_tbe def test_greater_2d_scalar0(): a = np.random.randint(-5, 5, [8, 32]).astype(np.int32) b = np.random.randint(-5, 5, [8, 32]).astype(np.int32) out_me = me_greater(Tensor(a), Tensor(b)) logger.info("Check me result:") - logger.info(out_me) \ No newline at end of file + logger.info(out_me) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_layernorm.py b/tests/st/ops/ascend/test_tbe_ops/test_layernorm.py index f3e4e43958..4ef32dee6e 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_layernorm.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_layernorm.py @@ -20,8 +20,10 @@ from mindspore.train.model import Model from mindspore import log as logger import pytest from mindspore import context + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + class Net(Cell): def __init__(self, input_shape, begin_norm_axis, begin_params_axis, gamma, beta): super(Net, self).__init__() @@ -31,6 +33,7 @@ class Net(Cell): x = self.layernorm(input) return x + def pt_me_layernorm(input_data, normalized_shape, gamma, beta, axis): net = Net(normalized_shape, begin_norm_axis=axis, begin_params_axis=axis, @@ -42,6 +45,7 @@ def pt_me_layernorm(input_data, normalized_shape, gamma, beta, axis): logger.info("Check me result:") logger.info(out_me.asnumpy()) + @pytest.mark.lower_bs def test_normal_layernorm_1_128_1024_axis_2(): """ @@ -52,4 +56,4 @@ def test_normal_layernorm_1_128_1024_axis_2(): gamma.fill(1.1) beta = np.random.randn(1024).astype(np.float32) beta.fill(0.1) - pt_me_layernorm(input_data, (1024, ), gamma, beta, 2) + pt_me_layernorm(input_data, (1024,), gamma, beta, 2) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py index 5ae09886ce..b9d706e25e 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py @@ -19,18 +19,21 @@ from mindspore.nn import Cell from mindspore.ops.composite import GradOperation from mindspore import log as logger from mindspore import context + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) self.network = network - def construct(self, input, output_grad,): + def construct(self, input, output_grad, ): gout = self.grad(self.network)(input, output_grad) return gout + class Net(Cell): def __init__(self, input_shape, begin_norm_axis, begin_params_axis, gamma, beta): super(Net, self).__init__() @@ -40,6 +43,7 @@ class Net(Cell): x = self.layernorm(input) return x + def py_me_layernorm_grad(input_data, normalized_shape, gamma, beta, axis, gradients): input_me = Tensor(input_data) net_me = Grad(Net(normalized_shape, begin_norm_axis=axis, @@ -52,6 +56,7 @@ def py_me_layernorm_grad(input_data, normalized_shape, gamma, beta, axis, gradie logger.info("Check me result:") logger.info(out_grad.asnumpy()) + def test_normal_layernorm_grad_normalize_2d(): """ 1 input[1, 128, 1024],normalized_shape=[1024],element_affine=False diff --git a/tests/st/ops/ascend/test_tbe_ops/test_less.py b/tests/st/ops/ascend/test_tbe_ops/test_less.py index ef65fbe3b2..65926ef6d7 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_less.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_less.py @@ -18,7 +18,10 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -28,8 +31,10 @@ class Net(nn.Cell): def construct(self, x1, x2): return self.less(x1, x2) -x1 = np.random.randn(3,4).astype(np.float16) -x2 = np.random.randn(3,4).astype(np.float16) + +x1 = np.random.randn(3, 4).astype(np.float16) +x2 = np.random.randn(3, 4).astype(np.float16) + def test_net(): less = Net() @@ -37,4 +42,3 @@ def test_net(): print(x1) print(x2) print(output.asnumpy()) - diff --git a/tests/st/ops/ascend/test_tbe_ops/test_less_equal.py b/tests/st/ops/ascend/test_tbe_ops/test_less_equal.py index 503c7b7e76..bca00bd678 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_less_equal.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_less_equal.py @@ -18,7 +18,10 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -28,8 +31,10 @@ class Net(nn.Cell): def construct(self, x1, x2): return self.less_equal(x1, x2) -x1 = np.random.randn(3,4).astype(np.float16) -x2 = np.random.randn(3,4).astype(np.float16) + +x1 = np.random.randn(3, 4).astype(np.float16) +x2 = np.random.randn(3, 4).astype(np.float16) + def test_net(): less_equal = Net() @@ -37,4 +42,3 @@ def test_net(): print(x1) print(x2) print(output.asnumpy()) - diff --git a/tests/st/ops/ascend/test_tbe_ops/test_logical_and.py b/tests/st/ops/ascend/test_tbe_ops/test_logical_and.py index 1df04b27d4..650de5d3bf 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_logical_and.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_logical_and.py @@ -18,7 +18,10 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -28,12 +31,14 @@ class Net(nn.Cell): def construct(self, x1, x2): return self.logical_and(x1, x2) + x1 = [True, True, False, False, True, True, False, False] x2 = [True, False, False, True, True, False, False, True] + + def test_net(): logical_and = Net() output = logical_and(Tensor(x1), Tensor(x2)) print(x1) print(x2) print(output.asnumpy()) - diff --git a/tests/st/ops/ascend/test_tbe_ops/test_logical_not.py b/tests/st/ops/ascend/test_tbe_ops/test_logical_not.py index 5d13a48138..2b78a089c3 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_logical_not.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_logical_not.py @@ -18,7 +18,10 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -28,11 +31,12 @@ class Net(nn.Cell): def construct(self, x1): return self.logical_not(x1) + x1 = [True, True, False, False, True, True, False, False] + def test_net(): logical_not = Net() output = logical_not(Tensor(x1)) print(x1) print(output.asnumpy()) - diff --git a/tests/st/ops/ascend/test_tbe_ops/test_logical_or.py b/tests/st/ops/ascend/test_tbe_ops/test_logical_or.py index a2b7841c71..e7dc6a2723 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_logical_or.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_logical_or.py @@ -18,7 +18,10 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -28,12 +31,14 @@ class Net(nn.Cell): def construct(self, x1, x2): return self.logical_or(x1, x2) + x1 = [True, True, False, False, True, True, False, False] x2 = [True, False, False, True, True, False, False, True] + + def test_net(): logical_or = Net() output = logical_or(Tensor(x1), Tensor(x2)) print(x1) print(x2) print(output.asnumpy()) - diff --git a/tests/st/ops/ascend/test_tbe_ops/test_matmul.py b/tests/st/ops/ascend/test_tbe_ops/test_matmul.py index 92115409f7..8c8b2c380f 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_matmul.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_matmul.py @@ -21,6 +21,7 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -30,8 +31,10 @@ class Net(nn.Cell): def construct(self, x1, x2): return self.matmul(x1, x2) -x1 = np.random.randn(1,3).astype(np.float32) -x2 = np.random.randn(3,4).astype(np.float32) + +x1 = np.random.randn(1, 3).astype(np.float32) +x2 = np.random.randn(3, 4).astype(np.float32) + def test_net(): context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py b/tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py index 4775f73281..8aebb7bd7e 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py @@ -20,7 +20,10 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -30,8 +33,10 @@ class Net(nn.Cell): def construct(self, x1, x2): return self.matmul(x1, x2) -x1 = np.random.randn(10,1).astype(np.float32) -x2 = np.random.randn(100,1).astype(np.float32) + +x1 = np.random.randn(10, 1).astype(np.float32) +x2 = np.random.randn(100, 1).astype(np.float32) + def test_net(): matmul = Net() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_maximum.py b/tests/st/ops/ascend/test_tbe_ops/test_maximum.py index ba66f0c038..b5a868c39a 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_maximum.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_maximum.py @@ -22,14 +22,16 @@ from mindspore.ops import operations as P context.set_context(device_target="Ascend") + class Max(nn.Cell): - def __init__(self,dtype): + def __init__(self, dtype): super(Max, self).__init__() self.max = P.Maximum() def construct(self, inputa, inputb): return self.max(inputa, inputb) + def me_max(inputa, inputb, dtype=ms.float32): context.set_context(mode=context.GRAPH_MODE) net = Max(dtype) @@ -44,14 +46,16 @@ def me_max(inputa, inputb, dtype=ms.float32): print(out) return out.asnumpy() -def cmp_max(a,b): + +def cmp_max(a, b): out = np.maximum(a, b) out_ms = me_max(a, b) print("-------ms------") print("numpy out :{}".format(out)) print("ms out :{}".format(out_ms)) + def test_maximum_2_2(): a = np.random.randn(2, 2).astype(np.float32) b = np.random.randn(2, 2).astype(np.float32) - cmp_max(a,b) \ No newline at end of file + cmp_max(a, b) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py index 4b3103cbfe..7c32e01b45 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py @@ -22,6 +22,7 @@ from mindspore.ops import operations as P context.set_context(device_target="Ascend") grad = C.GradOperation('get_all', get_all=True, sens_param=True) + class MaxNetMe(Cell): def __init__(self): super(MaxNetMe, self).__init__() @@ -31,6 +32,7 @@ class MaxNetMe(Cell): x = self.max(inputA, inputB) return x + class GradWrap(Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -40,6 +42,7 @@ class GradWrap(Cell): gout = grad(self.network)(inputA, inputB, sens) return gout + def gen_data(inputA_np, inputB_np, grad=None): inputA_me = inputA_np if isinstance(inputA_np, np.ndarray) == True: @@ -61,7 +64,8 @@ def gen_data(inputA_np, inputB_np, grad=None): print(output[0].asnumpy()) print(output[1].asnumpy()) + def test_net(): inputA_np = np.random.randn(1, 3, 2, 2).astype(np.float32) inputB_np = np.random.randn(1, 3, 2, 2).astype(np.float32) - gen_data(inputA_np, inputB_np) \ No newline at end of file + gen_data(inputA_np, inputB_np) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_maxpool.py b/tests/st/ops/ascend/test_tbe_ops/test_maxpool.py index 5baaa32d52..625eb0dc5c 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_maxpool.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_maxpool.py @@ -19,12 +19,12 @@ from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.maxpool = P.MaxPool(padding="SAME", ksize=3, strides=2) - @ms_function def construct(self, x): output = self.maxpool(x) @@ -32,7 +32,7 @@ class Net(nn.Cell): def test_net(): - x = np.random.randn(32,64,112,112).astype(np.float16) + x = np.random.randn(32, 64, 112, 112).astype(np.float16) context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") maxpool = Net() output = maxpool(Tensor(x)) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py index 9651168634..d9436ea2d2 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py @@ -19,6 +19,7 @@ from mindspore.common.api import ms_function import numpy as np import mindspore.context as context from mindspore.ops.composite import GradOperation + context.set_context(device_target="Ascend") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_minimum.py b/tests/st/ops/ascend/test_tbe_ops/test_minimum.py index a107d46f43..80a9bc8c3f 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_minimum.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_minimum.py @@ -22,7 +22,10 @@ from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter import mindspore as ms from mindspore.train.model import Model + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + class Min(nn.Cell): def __init__(self, dtype): super(Min, self).__init__() @@ -46,7 +49,8 @@ def me_min(inputa, inputb, dtype=ms.float32): print(out) return out.asnumpy() -def cmp_min(a,b): + +def cmp_min(a, b): print(a) print(b) @@ -55,8 +59,8 @@ def cmp_min(a,b): out_me = me_min(a, b) print(out_me) + def test_minimum_2_2(): a = np.random.randn(2, 2, 1, 1).astype(np.float32) b = np.random.randn(2, 2, 1, 1).astype(np.float32) - cmp_min(a,b) - + cmp_min(a, b) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py index cbf870bb21..fc22e2a859 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py @@ -22,6 +22,8 @@ from mindspore.ops.operations import Minimum context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") grad = C.GradOperation('get_all', get_all=True, sens_param=True) + + class MinNetMe(Cell): def __init__(self): super(MinNetMe, self).__init__() @@ -41,6 +43,7 @@ class GradWrap(Cell): gout = grad(self.network)(inputA, inputB, sens) return gout + def gen_data(inputA_np, inputB_np, grad=None): inputA_me = inputA_np if isinstance(inputA_np, np.ndarray) == True: @@ -51,7 +54,7 @@ def gen_data(inputA_np, inputB_np, grad=None): inputB_me = Tensor(inputB_np) if grad is None: - grad = np.random.randn(1, 3, 2, 2).astype(np.float32) + grad = np.random.randn(1, 3, 2, 2).astype(np.float32) print(inputA_np) print(inputB_np) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_mul.py b/tests/st/ops/ascend/test_tbe_ops/test_mul.py index dbc8c77e11..d8030ffdc0 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_mul.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_mul.py @@ -18,7 +18,10 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -28,8 +31,10 @@ class Net(nn.Cell): def construct(self, x1, x2): return self.mul(x1, x2) -x1 = np.random.randn(3,4).astype(np.float32) -x2 = np.random.randn(3,4).astype(np.float32) + +x1 = np.random.randn(3, 4).astype(np.float32) +x2 = np.random.randn(3, 4).astype(np.float32) + def test_net(): mul = Net() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_npu_alloc_float_status.py b/tests/st/ops/ascend/test_tbe_ops/test_npu_alloc_float_status.py index efa6a03288..2de0015fe5 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_npu_alloc_float_status.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_npu_alloc_float_status.py @@ -18,7 +18,10 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -28,8 +31,8 @@ class Net(nn.Cell): def construct(self): return self.npu_alloc_float_status() + def test_net(): npu_alloc_float_status = Net() output = npu_alloc_float_status() print(output.asnumpy()) - diff --git a/tests/st/ops/ascend/test_tbe_ops/test_npu_clear_float_status.py b/tests/st/ops/ascend/test_tbe_ops/test_npu_clear_float_status.py index 0c6072fd6f..5f92b2b7ed 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_npu_clear_float_status.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_npu_clear_float_status.py @@ -18,7 +18,10 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -28,11 +31,12 @@ class Net(nn.Cell): def construct(self, x1): return self.npu_clear_float_status(x1) + x1 = np.random.randn(8).astype(np.float32) + def test_net(): npu_clear_float_status = Net() output = npu_clear_float_status(Tensor(x1)) print(x1) print(output.asnumpy()) - diff --git a/tests/st/ops/ascend/test_tbe_ops/test_npu_get_float_status.py b/tests/st/ops/ascend/test_tbe_ops/test_npu_get_float_status.py index 403f815c26..9e539eae86 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_npu_get_float_status.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_npu_get_float_status.py @@ -18,7 +18,10 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -28,11 +31,12 @@ class Net(nn.Cell): def construct(self, x1): return self.npu_get_float_status(x1) + x1 = np.random.randn(8).astype(np.float32) + def test_net(): npu_get_float_status = Net() output = npu_get_float_status(Tensor(x1)) print(x1) print(output.asnumpy()) - diff --git a/tests/st/ops/ascend/test_tbe_ops/test_pad.py b/tests/st/ops/ascend/test_tbe_ops/test_pad.py index 5590ca597a..727939b7cd 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_pad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_pad.py @@ -18,21 +18,24 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") class Net(nn.Cell): def __init__(self): super(Net, self).__init__() - self.pad = P.Pad(paddings=((3,2), (2,3))) + self.pad = P.Pad(paddings=((3, 2), (2, 3))) @ms_function def construct(self, x): x = self.pad(x) return x + x = np.random.random(size=(2, 2)).astype(np.float32) + def test_net(): pad = Net() output = pad(Tensor(x)) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_pow.py b/tests/st/ops/ascend/test_tbe_ops/test_pow.py index 1bfcf1f63b..a0c3a94af4 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_pow.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_pow.py @@ -23,8 +23,10 @@ from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter import mindspore as ms from mindspore.train.model import Model + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + class PowMe(Cell): def __init__(self): super(PowMe, self).__init__() @@ -33,6 +35,7 @@ class PowMe(Cell): def construct(self, input, exp): return self.pow(input, exp) + def pow_forward_me_impl(input, exp): n = PowMe() n.set_train() @@ -40,6 +43,7 @@ def pow_forward_me_impl(input, exp): out = m.predict(input, exp) return out.asnumpy() + def pow_forward_cmp(input_shape, exp_shape): if len(input_shape) == 0: input_np = np.absolute(np.random.randn()) @@ -54,14 +58,14 @@ def pow_forward_cmp(input_shape, exp_shape): exp_np = np.absolute(np.random.randn(*exp_shape).astype(np.float32)) exp_tf = exp_np exp_me = Tensor(exp_np, dtype=ms.float32) - + out_me = pow_forward_me_impl(input_me, exp_me) print(input_me) print(exp_me) print(out_me) - + + def test_pow_input_scalar_exp_scalar(): input_shape = [] exp_shape = [] pow_forward_cmp(input_shape, exp_shape) - diff --git a/tests/st/ops/ascend/test_tbe_ops/test_realdiv.py b/tests/st/ops/ascend/test_tbe_ops/test_realdiv.py index ea72c7a78b..00e4ba0911 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_realdiv.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_realdiv.py @@ -18,7 +18,10 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -28,8 +31,10 @@ class Net(nn.Cell): def construct(self, x1, x2): return self.realdiv(x1, x2) -x1 = np.random.randn(3,4).astype(np.float32) -x2 = np.random.randn(3,4).astype(np.float32) + +x1 = np.random.randn(3, 4).astype(np.float32) +x2 = np.random.randn(3, 4).astype(np.float32) + def test_net(): realdiv = Net() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_reciprocal.py b/tests/st/ops/ascend/test_tbe_ops/test_reciprocal.py index f90a2ae9e9..278adc1dc2 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_reciprocal.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_reciprocal.py @@ -18,7 +18,10 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -28,11 +31,12 @@ class Net(nn.Cell): def construct(self, x1): return self.reciprocal(x1) + x1 = np.random.randn(3, 4).astype(np.float32) + def test_net(): reciprocal = Net() output = reciprocal(Tensor(x1)) print(x1) print(output.asnumpy()) - diff --git a/tests/st/ops/ascend/test_tbe_ops/test_relu.py b/tests/st/ops/ascend/test_tbe_ops/test_relu.py index dd29a4bea0..cc07c5d58a 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_relu.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_relu.py @@ -20,7 +20,10 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -30,8 +33,9 @@ class Net(nn.Cell): def construct(self, x): return self.relu(x) + def test_net(): - x = np.random.randn(2,3,3,4).astype(np.float32) + x = np.random.randn(2, 3, 3, 4).astype(np.float32) relu = Net() output = relu(Tensor(x)) print(x) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py index 012991d208..aef8aeb214 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py @@ -21,8 +21,10 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.ops.composite import GradOperation + context.set_context(device_target="Ascend") + class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() @@ -33,6 +35,7 @@ class Grad(nn.Cell): def construct(self, input, output_grad): return self.grad(self.network)(input, output_grad) + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -41,9 +44,10 @@ class Net(nn.Cell): def construct(self, x): return self.relu(x) + def test_net(): - x = np.random.randn(2,3,3,4).astype(np.float32) - sens = np.random.randn(2,3,3,4).astype(np.float32) + x = np.random.randn(2, 3, 3, 4).astype(np.float32) + sens = np.random.randn(2, 3, 3, 4).astype(np.float32) net = Grad(Net()) output = net(Tensor(x), Tensor(sens)) print(len(output)) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py index 28bf566c2d..ae736a5c4a 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py @@ -21,8 +21,10 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.ops.composite import GradOperation + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() @@ -33,6 +35,7 @@ class Grad(nn.Cell): def construct(self, input): return self.grad(self.network)(input) + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -41,8 +44,9 @@ class Net(nn.Cell): def construct(self, x): return self.relu_v2(x) + def test_net(): - x = Tensor(np.ones((2,3,3,4)).astype(np.float32)) + x = Tensor(np.ones((2, 3, 3, 4)).astype(np.float32)) relu_net = Net() relu_output = relu_net(x) net = Grad(Net()) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor.py b/tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor.py index 8fa67f9c47..62dc123b48 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor.py @@ -18,8 +18,10 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -29,6 +31,7 @@ class Net(nn.Cell): def construct(self, x): return self.upsample(x) + def test_net(): x = np.random.random(size=(32, 3, 32, 32)).astype(np.float32) upsample = Net() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor_grad.py index 16714a00c3..115989e272 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor_grad.py @@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") @@ -49,4 +50,4 @@ def test_net(): grad = Grad(Net()) output = grad(Tensor(image), Tensor(grads)) print("=================output====================") - print(output) \ No newline at end of file + print(output) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_scatter_nd.py b/tests/st/ops/ascend/test_tbe_ops/test_scatter_nd.py index a12a863902..e518e14d76 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_scatter_nd.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_scatter_nd.py @@ -20,6 +20,7 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") @@ -29,12 +30,13 @@ class Net(nn.Cell): self.scatternd = P.ScatterNd() def construct(self, indices, update): - return self.scatternd(indices, update, (3,3)) + return self.scatternd(indices, update, (3, 3)) indices = np.array([[0, 1], [1, 1]]).astype(np.int32) update = np.array([3.2, 1.1]).astype(np.float32) + def test_net(): scatternd = Net() print(indices) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_select.py b/tests/st/ops/ascend/test_tbe_ops/test_select.py index 1734e6aa82..b409b91b88 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_select.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_select.py @@ -23,7 +23,10 @@ from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter import mindspore as ms from mindspore.train.model import Model + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + class Select(Cell): def __init__(self, dtype): super(Select, self).__init__() @@ -32,6 +35,7 @@ class Select(Cell): def construct(self, cond, inputa, inputb): return self.select(cond, inputa, inputb) + def me_select(cond, inputa, inputb, dtype=ms.float32): net = Select(dtype) net.set_train() @@ -45,9 +49,10 @@ def me_select(cond, inputa, inputb, dtype=ms.float32): out = model.predict(Tensor(cond), inputa, inputb) return out.asnumpy() - -def cmp_select(input_cond,inputa,inputb): - cond = input_cond > 0.5 + + +def cmp_select(input_cond, inputa, inputb): + cond = input_cond > 0.5 out_me = me_select(cond, inputa, inputb) print(input_cond) print(cond) @@ -55,9 +60,9 @@ def cmp_select(input_cond,inputa,inputb): print(inputb) print(out_me) + def test_select_2_2(): input_cond = np.random.rand(2, 2) - inputa = np.random.randn(2,2).astype(np.float32) - inputb = np.random.randn(2,2).astype(np.float32) - cmp_select(input_cond,inputa,inputb) - + inputa = np.random.randn(2, 2).astype(np.float32) + inputb = np.random.randn(2, 2).astype(np.float32) + cmp_select(input_cond, inputa, inputb) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_sigmoid.py b/tests/st/ops/ascend/test_tbe_ops/test_sigmoid.py index f1610f01ab..489b56a98c 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_sigmoid.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_sigmoid.py @@ -18,8 +18,10 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -29,6 +31,7 @@ class Net(nn.Cell): def construct(self, x): return self.sigmoid(x) + def test_net(): x = np.random.random(size=(2, 3)).astype(np.float32) sigmoid = Net() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits.py b/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits.py index 2cac3bba16..8014c687fc 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits.py @@ -21,6 +21,7 @@ import mindspore.context as context context.set_context(device_target="Ascend") + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits_grad.py index dbcfe059ac..4018c3c080 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits_grad.py @@ -22,6 +22,7 @@ import mindspore.context as context context.set_context(device_target="Ascend") + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_grad.py index 189e75c4f9..7813f7f846 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_grad.py @@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") @@ -42,6 +43,7 @@ class Grad(nn.Cell): def construct(self, x, y): return self.grad(self.network)(x, y) + def test_net(): x = np.random.random(size=(2, 3, 4, 5, 6)).astype(np.float32) y = np.random.random(size=(2, 3, 4, 5, 6)).astype(np.float32) @@ -49,4 +51,3 @@ def test_net(): output = net(Tensor(x), Tensor(y)) print("=================output====================") print(output.asnumpy()) - diff --git a/tests/st/ops/ascend/test_tbe_ops/test_slice.py b/tests/st/ops/ascend/test_tbe_ops/test_slice.py index 4b55731e18..945da861b2 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_slice.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_slice.py @@ -20,26 +20,28 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") class Slice(nn.Cell): - def __init__( self): + def __init__(self): super(Slice, self).__init__() self.cat = P.Slice() self.x1 = Parameter(initializer( - Tensor(np.array([[[1, -1, 1], [2, -2, 2]], [[3, -3, 3], [4, -4, 4]], [[5, -5, 5], [6, -6, 6]]]).astype(np.float32)), [3,2,3]), name='x1') + Tensor(np.array([[[1, -1, 1], [2, -2, 2]], [[3, -3, 3], [4, -4, 4]], [[5, -5, 5], [6, -6, 6]]]).astype( + np.float32)), [3, 2, 3]), name='x1') @ms_function def construct(self): - return self.cat(self.x1, (0,1, 0), (2, 1, 3)) + return self.cat(self.x1, (0, 1, 0), (2, 1, 3)) def test_slice(): cat = Slice() output = cat() - expect = [[[2., -2., 2.]], - [[4., -4., 4.]]] + expect = [[[2., -2., 2.]], + [[4., -4., 4.]]] print(output) - assert (output.asnumpy() == expect).all() \ No newline at end of file + assert (output.asnumpy() == expect).all() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss.py b/tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss.py index cc0c0e0fc2..1a751038dd 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss.py @@ -18,6 +18,7 @@ import mindspore.nn as nn import mindspore.context as context from mindspore import Tensor from mindspore.ops import operations as P + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss_grad.py index 1ab9d998a1..bf4a5d89fe 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss_grad.py @@ -31,6 +31,7 @@ class Net(nn.Cell): def construct(self, pred, gt): return self.SmoothL1Loss(pred, gt) + class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_softmax.py b/tests/st/ops/ascend/test_tbe_ops/test_softmax.py index 741e0ba699..308e7d85ed 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_softmax.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_softmax.py @@ -20,17 +20,22 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.Softmax = P.Softmax() - + def construct(self, x): return self.Softmax(x) + x = np.array([[5, 1]]).astype(np.float32) + def test_net(): softmax = Net() output = softmax(Tensor(x)) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_softmax_cross_entropy_with_logits.py b/tests/st/ops/ascend/test_tbe_ops/test_softmax_cross_entropy_with_logits.py index f21355533d..32a6605426 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_softmax_cross_entropy_with_logits.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_softmax_cross_entropy_with_logits.py @@ -18,6 +18,7 @@ import mindspore.nn as nn from mindspore.common.api import ms_function import numpy as np import mindspore.context as context + context.set_context(device_target="Ascend") @@ -36,4 +37,4 @@ def test_net(): labels = np.random.randn(32, 1001).astype(np.float16) SoftmaxCrossEntropyWithLogits = Net() output = SoftmaxCrossEntropyWithLogits(Tensor(features), Tensor(labels)) - #print(output.asnumpy()) + # print(output.asnumpy()) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_split.py b/tests/st/ops/ascend/test_tbe_ops/test_split.py index 0e75643dea..a31c126147 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_split.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_split.py @@ -20,7 +20,10 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -29,7 +32,8 @@ class Net(nn.Cell): def construct(self, x): return self.split(x) -x = np.random.randn(2,4).astype(np.float32) + +x = np.random.randn(2, 4).astype(np.float32) def test_net(): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_sqrt.py b/tests/st/ops/ascend/test_tbe_ops/test_sqrt.py index cd7176a60c..37d5b6962a 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_sqrt.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_sqrt.py @@ -20,17 +20,22 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.sqrt = P.Sqrt() - + def construct(self, x): return self.sqrt(x) + x = np.array([1.0, 4.0, 9.0]).astype(np.float32) + def test_net(): sqrt = Net() output = sqrt(Tensor(x)) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_square.py b/tests/st/ops/ascend/test_tbe_ops/test_square.py index 81cad39bfa..3a9b10c9be 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_square.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_square.py @@ -20,17 +20,22 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.square = P.Square() - + def construct(self, x): return self.square(x) + x = np.array([1.0, 4.0, 9.0]).astype(np.float32) + def test_net(): square = Net() output = square(Tensor(x)) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_stridedslice.py b/tests/st/ops/ascend/test_tbe_ops/test_stridedslice.py index 65f4f75538..1195d1352c 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_stridedslice.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_stridedslice.py @@ -19,7 +19,10 @@ from mindspore.nn import Cell from mindspore.train.model import Model import pytest import mindspore.context as context + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + class Net(Cell): def __init__(self, begin, end, stride): super(Net, self).__init__() @@ -32,6 +35,7 @@ class Net(Cell): x = self.stridedslice(input, self.begin, self.end, self.stride) return x + def me_stridedslice(input1, begin, end, stride): input_me = Tensor(input1) net = Net(begin, end, stride) @@ -40,17 +44,19 @@ def me_stridedslice(input1, begin, end, stride): output = model.predict(input_me) print(output.asnumpy()) + def test_stridedslice_input_2d(): input = np.random.randn(5, 5).astype(np.int32) - begin = (0,0) - end = (2,2) - stride = (1,1) + begin = (0, 0) + end = (2, 2) + stride = (1, 1) me_stridedslice(input, begin, end, stride) + def test_stridedslice_input_3d(): input = np.random.randn(5, 5, 5).astype(np.float32) - begin = (0,0,0) - end = (3,3,3) - stride = (1,1,1) + begin = (0, 0, 0) + end = (3, 3, 3) + stride = (1, 1, 1) me_stridedslice(input, begin, end, stride) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py index d5ee390c9b..4bf4bee31a 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py @@ -19,8 +19,10 @@ from mindspore.nn import Cell from mindspore.ops.composite import GradOperation from mindspore import context import pytest + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() @@ -31,6 +33,7 @@ class Grad(Cell): gout = self.grad(self.network)(input, output_grad) return gout + class Net(Cell): def __init__(self, begin, end, stride): super(Net, self).__init__() @@ -43,6 +46,7 @@ class Net(Cell): x = self.stridedslice(input, self.begin, self.end, self.stride) return x + def me_stridedslice(input, begin, end, stride, gradients): input_me = Tensor(input) out_grad_me = Tensor(gradients) @@ -51,6 +55,7 @@ def me_stridedslice(input, begin, end, stride, gradients): out_grad = net_me(input_me, out_grad_me) print(out_grad.asnumpy()) + def test_grad_stridedslice_1d(): input = np.random.randn(2).astype(np.float32) begin = (0,) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_sub.py b/tests/st/ops/ascend/test_tbe_ops/test_sub.py index 9f035f773d..ae2fede212 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_sub.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_sub.py @@ -20,17 +20,21 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.sub = P.Sub() - + def construct(self, x, y): return self.sub(x, y) -x = np.random.randn(1,3,3,4).astype(np.float32) -y = np.random.randn(1,3,3,4).astype(np.float32) + +x = np.random.randn(1, 3, 3, 4).astype(np.float32) +y = np.random.randn(1, 3, 3, 4).astype(np.float32) def test_net(): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_tanh.py b/tests/st/ops/ascend/test_tbe_ops/test_tanh.py index 6c2b0b7fef..e889401378 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_tanh.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_tanh.py @@ -21,6 +21,7 @@ from mindspore.ops import operations as P context.set_context(device_target="Ascend") + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -29,9 +30,12 @@ class Net(nn.Cell): def construct(self, x): return self.tanh(x) + input_shape = [1] input_np = np.random.randn(*input_shape).astype(np.float32) input_me = Tensor(input_np) + + def test_net(): context.set_context(mode=context.GRAPH_MODE) tanh = Net() @@ -40,4 +44,4 @@ def test_net(): out = m.predict(input_me) print("out_me.dtype={}".format(out.dtype)) print("out_me.asnumpy={}".format(out.asnumpy())) - return out.asnumpy() \ No newline at end of file + return out.asnumpy() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_tanh_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_tanh_grad.py index 683d1b9d30..4dcf79e647 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_tanh_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_tanh_grad.py @@ -22,6 +22,7 @@ from mindspore.ops.operations import _grad_ops as G context.set_context(device_target="Ascend") + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -30,9 +31,12 @@ class Net(nn.Cell): def construct(self, y, dy): return self.tanh_grad(y, dy) + input_shape = [1] input_np = np.random.randn(*input_shape).astype(np.float32) input_me = Tensor(input_np) + + def test_net(): context.set_context(mode=context.GRAPH_MODE) tanh_grad = Net() @@ -41,4 +45,4 @@ def test_net(): out = m.predict(input_me, input_me) print("out_me.dtype={}".format(out.dtype)) print("out_me.asnumpy={}".format(out.asnumpy())) - return out.asnumpy() \ No newline at end of file + return out.asnumpy() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_tile.py b/tests/st/ops/ascend/test_tbe_ops/test_tile.py index 7594248d53..b613fb8fa5 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_tile.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_tile.py @@ -20,6 +20,7 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_topk.py b/tests/st/ops/ascend/test_tbe_ops/test_topk.py index 275ef50038..4ca22b211c 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_topk.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_topk.py @@ -20,7 +20,10 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + class Net(nn.Cell): def __init__(self, k): super(Net, self).__init__() @@ -32,7 +35,7 @@ class Net(nn.Cell): def test_net(): - x = np.random.randn(4,4).astype(np.float16) + x = np.random.randn(4, 4).astype(np.float16) k = 2 TopK = Net(k) output = TopK(Tensor(x)) @@ -41,4 +44,3 @@ def test_net(): print("***********output y*********") print(output[0].asnumpy()) - diff --git a/tests/st/ops/ascend/test_tbe_ops/test_transpose_d.py b/tests/st/ops/ascend/test_tbe_ops/test_transpose_d.py index faa3b7d559..14bed5fbf0 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_transpose_d.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_transpose_d.py @@ -21,6 +21,7 @@ from mindspore.ops import operations as P context.set_context(device_target="Ascend") + class Net(nn.Cell): def __init__(self, perm_in): super(Net, self).__init__() @@ -31,6 +32,7 @@ class Net(nn.Cell): x = self.transpose(input, self.perm) return x + def ms_transpose(input, perm_in): context.set_context(mode=context.GRAPH_MODE) input_me = Tensor(input) @@ -42,7 +44,8 @@ def ms_transpose(input, perm_in): print(output.asnumpy().dtype) print(output.asnumpy()) + def test_net(): input = np.random.randn(8, 24, 1, 1).astype(np.float16) perm = (0, 2, 3, 1) - ms_transpose(input, perm) \ No newline at end of file + ms_transpose(input, perm) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_unsorted_segment_sum.py b/tests/st/ops/ascend/test_tbe_ops/test_unsorted_segment_sum.py index 89f6029519..9a98f94bc5 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_unsorted_segment_sum.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_unsorted_segment_sum.py @@ -21,6 +21,7 @@ from mindspore.ops import operations as P context.set_context(device_target="Ascend") + class Net(nn.Cell): def __init__(self, num_segments): super(Net, self).__init__() @@ -30,6 +31,7 @@ class Net(nn.Cell): def construct(self, x, segment_ids): return self.seg_sum(x, segment_ids, self.num_segments) + def me_un_seg_sum(input, indices, num_segments): context.set_context(mode=context.GRAPH_MODE) net = Net(num_segments) @@ -38,6 +40,7 @@ def me_un_seg_sum(input, indices, num_segments): out = model.predict(Tensor(input), Tensor(indices)) return out.asnumpy() + def comapre_un_seg_sum(shape, indices, num_segments, dtype): input = np.random.randn(*shape).astype(dtype) indices_me = np.array(indices).astype(np.int32) @@ -45,6 +48,7 @@ def comapre_un_seg_sum(shape, indices, num_segments, dtype): print("-------------ms------------------") print(out_me) + def test_net(): indices = np.random.randint(0, 1280, 1280) - comapre_un_seg_sum([1280, 768], indices, 8192, np.float32) \ No newline at end of file + comapre_un_seg_sum([1280, 768], indices, 8192, np.float32) diff --git a/tests/st/ops/ascend/test_tdt_data_ms.py b/tests/st/ops/ascend/test_tdt_data_ms.py index 89f6f212d0..fb42a28a2f 100644 --- a/tests/st/ops/ascend/test_tdt_data_ms.py +++ b/tests/st/ops/ascend/test_tdt_data_ms.py @@ -95,7 +95,7 @@ if __name__ == '__main__': net.set_train() _executor.init_dataset(ds1.queue_name, 39, batch_size, - dataset_types, dataset_shapes, (), 'dataset') + dataset_types, dataset_shapes, (), 'dataset') ds1.send() for data in data_set.create_tuple_iterator(): @@ -113,4 +113,3 @@ if __name__ == '__main__': (data[0] == d).all()), "TDT test execute failed, please check current code commit" print( "+++++++++++++++++++++++++++++++++++[INFO] Success+++++++++++++++++++++++++++++++++++++++++++") -