!1563 Fixing some tiny faults about Pylint in my code(ops)

Merge pull request !1563 from liuwenhao/master
pull/1563/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit d3dbb10b6d

@ -164,9 +164,10 @@ def CusBatchMatMul(input_x1, input_x2, output, transpose_a=False, transpose_b=Tr
matmul_hybrid_f_t_local_UB = tik_instance.Tensor(dtype, [64], matmul_hybrid_f_t_local_UB = tik_instance.Tensor(dtype, [64],
name="matmul_hybrid_f_t_local_UB", name="matmul_hybrid_f_t_local_UB",
scope=tik.scope_ubuf) scope=tik.scope_ubuf)
matmul_hybrid_f_t_local_UB_dst_tmp = tik_instance.Tensor(dtype, [64], matmul_hybrid_f_t_local_UB_dst_tmp = tik_instance.Tensor(
name="matmul_hybrid_f_t_local_UB_dst_tmp", dtype, [64],
scope=tik.scope_ubuf) name="matmul_hybrid_f_t_local_UB_dst_tmp",
scope=tik.scope_ubuf)
tik_instance.vector_dup(64, matmul_hybrid_f_t_local_UB, 0, 1, 1, 8) tik_instance.vector_dup(64, matmul_hybrid_f_t_local_UB, 0, 1, 1, 8)
tik_instance.data_move(input_2_local_UB, tik_instance.data_move(input_2_local_UB,
input2[(block_idx // 6) * 16384 + thread_idx2 * 8192], 0, 1, input2[(block_idx // 6) * 16384 + thread_idx2 * 8192], 0, 1,

@ -127,7 +127,7 @@ def _shape_check(shape_a, shape_b, shape_bias, src_dtype, trans_a, trans_b):
if n_shape % cce.BLOCK_IN != 0 and n_shape != 1: if n_shape % cce.BLOCK_IN != 0 and n_shape != 1:
raise RuntimeError("input shape N should be 1 or multiple of %d" % cce.BLOCK_IN) raise RuntimeError("input shape N should be 1 or multiple of %d" % cce.BLOCK_IN)
if len(shape_bias) != 0: if shape_bias:
if len(shape_bias) == 1: if len(shape_bias) == 1:
if is_gevm or is_gemv: if is_gevm or is_gemv:
if shape_bias[0] != m_shape * n_shape: if shape_bias[0] != m_shape * n_shape:
@ -189,7 +189,7 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
util.check_shape_size(shape_b, SHAPE_SIZE_LIMIT) util.check_shape_size(shape_b, SHAPE_SIZE_LIMIT)
try: try:
trans_a_f = bool(1 - trans_a) trans_a_f = bool(1 - trans_a)
if src_dtype == "float32" or src_dtype == "int32": if src_dtype in ("float32", "int32"):
if len(shape_a) != 2 and len(shape_b) != 2: if len(shape_a) != 2 and len(shape_b) != 2:
return False return False
if trans_b: if trans_b:
@ -239,6 +239,7 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
return False return False
except RuntimeError as e: except RuntimeError as e:
print(e)
return False return False
return True return True
@ -385,7 +386,7 @@ def CusMatMulCubeDenseLeft(input_x1, input_x2, bias=None, output_y={}, trans_a=F
tensor_b = tvm.placeholder(shape_b_temp, name='tensor_b', tensor_b = tvm.placeholder(shape_b_temp, name='tensor_b',
dtype=src_dtype) dtype=src_dtype)
if len(shape_bias) > 0: if shape_bias:
tensor_bias = tvm.placeholder(shape_bias, name='tensor_bias', tensor_bias = tvm.placeholder(shape_bias, name='tensor_bias',
dtype=dst_dtype) dtype=dst_dtype)
@ -449,20 +450,20 @@ def CusMatMulCubeDenseLeft(input_x1, input_x2, bias=None, output_y={}, trans_a=F
resMatmul_local_UB, 0, 16, 224 // 2, 0, 56 * 16 * 2 // 2) resMatmul_local_UB, 0, 16, 224 // 2, 0, 56 * 16 * 2 // 2)
tik_instance.BuildCCE(kernel_name=kernel_name, inputs=[input_x1, input_x2], outputs=[resMatmul]) tik_instance.BuildCCE(kernel_name=kernel_name, inputs=[input_x1, input_x2], outputs=[resMatmul])
return tik_instance return tik_instance
else:
print("come into tbe, shape is error!")
result = te.lang.cce.matmul(tensor_a, tensor_b, trans_a, trans_b, format_a=format_a,
format_b=format_b, dst_dtype=dst_dtype, tensor_bias=tensor_bias)
with tvm.target.cce(): print("come into tbe, shape is error!")
schedule = generic.auto_schedule(result) result = te.lang.cce.matmul(tensor_a, tensor_b, trans_a, trans_b, format_a=format_a,
format_b=format_b, dst_dtype=dst_dtype, tensor_bias=tensor_bias)
with tvm.target.cce():
schedule = generic.auto_schedule(result)
tensor_list = [tensor_a, tensor_b, result] tensor_list = [tensor_a, tensor_b, result]
if len(shape_bias) > 0: if shape_bias:
tensor_list = [tensor_a, tensor_b, tensor_bias, result] tensor_list = [tensor_a, tensor_b, tensor_bias, result]
config = {"print_ir": False, config = {"print_ir": False,
"name": kernel_name, "name": kernel_name,
"tensor_list": tensor_list} "tensor_list": tensor_list}
te.lang.cce.cce_build_code(schedule, config) te.lang.cce.cce_build_code(schedule, config)

@ -124,7 +124,7 @@ src_dtype: str
if n_shape % cce.BLOCK_IN != 0 and n_shape != 1: if n_shape % cce.BLOCK_IN != 0 and n_shape != 1:
raise RuntimeError("input shape N should be 1 or multiple of %d" % cce.BLOCK_IN) raise RuntimeError("input shape N should be 1 or multiple of %d" % cce.BLOCK_IN)
if len(shape_bias): if shape_bias:
if len(shape_bias) == 1: if len(shape_bias) == 1:
if is_gevm or is_gemv: if is_gevm or is_gemv:
if shape_bias[0] != m_shape * n_shape: if shape_bias[0] != m_shape * n_shape:
@ -144,11 +144,10 @@ def _get_bias(shape_bias):
bias_length = shape_bias[0] bias_length = shape_bias[0]
if bias_length % 16 == 0: if bias_length % 16 == 0:
return shape_bias return shape_bias
else: bias_length = (bias_length // 16) * 16 + 16
bias_length = (bias_length // 16) * 16 + 16 shape_bias = []
shape_bias = [] shape_bias.append(bias_length)
shape_bias.append(bias_length) return shape_bias
return shape_bias
def _get_input_shape(shape_x): def _get_input_shape(shape_x):
@ -184,7 +183,7 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
util.check_shape_size(shape_b, SHAPE_SIZE_LIMIT) util.check_shape_size(shape_b, SHAPE_SIZE_LIMIT)
try: try:
trans_a_f = bool(1 - trans_a) trans_a_f = bool(1 - trans_a)
if src_dtype == "float32" or src_dtype == "int32": if src_dtype in ("floate32", "int32"):
if len(shape_a) != 2 and len(shape_b) != 2: if len(shape_a) != 2 and len(shape_b) != 2:
return False return False
if trans_b: if trans_b:
@ -234,6 +233,7 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
return False return False
except RuntimeError as e: except RuntimeError as e:
print(e)
return False return False
return True return True

@ -80,8 +80,8 @@ def CusMatMulCubeFraczRightMul(input_x1, input_x2, input_x3, bias=None, output_y
((32, 128, 16, 16), 'float16', (32, 32, 16, 16), 'float16', (1,), 'float32'), ((32, 128, 16, 16), 'float16', (32, 32, 16, 16), 'float16', (1,), 'float32'),
((64, 32, 16, 16), 'float16', (64, 64, 16, 16), 'float16', (1,), 'float32'), ((64, 32, 16, 16), 'float16', (64, 64, 16, 16), 'float16', (1,), 'float32'),
((16, 64, 16, 16), 'float16', (16, 16, 16, 16), 'float16', (1,), 'float32')] ((16, 64, 16, 16), 'float16', (16, 16, 16, 16), 'float16', (1,), 'float32')]
input_shape = ( input_shape = (tuple(input_x1_shape), input_x1_dtype, tuple(input_x2_shape),
tuple(input_x1_shape), input_x1_dtype, tuple(input_x2_shape), input_x2_dtype, tuple(input_x3_shape), input_x3_dtype) input_x2_dtype, tuple(input_x3_shape), input_x3_dtype)
if input_shape not in Supported: if input_shape not in Supported:
raise RuntimeError("input_shape %s is not supported" % str(input_shape)) raise RuntimeError("input_shape %s is not supported" % str(input_shape))

@ -129,7 +129,7 @@ def _shape_check(shape_a, shape_b, shape_bias, src_dtype, trans_a, trans_b):
if n_shape % cce.BLOCK_IN != 0 and n_shape != 1: if n_shape % cce.BLOCK_IN != 0 and n_shape != 1:
raise RuntimeError("input shape N should be 1 or multiple of %d" % cce.BLOCK_IN) raise RuntimeError("input shape N should be 1 or multiple of %d" % cce.BLOCK_IN)
if len(shape_bias): if shape_bias:
if len(shape_bias) == 1: if len(shape_bias) == 1:
if is_gevm or is_gemv: if is_gevm or is_gemv:
if shape_bias[0] != m_shape * n_shape: if shape_bias[0] != m_shape * n_shape:
@ -149,11 +149,10 @@ def _get_bias(shape_bias):
bias_length = shape_bias[0] bias_length = shape_bias[0]
if bias_length % 16 == 0: if bias_length % 16 == 0:
return shape_bias return shape_bias
else: bias_length = (bias_length // 16) * 16 + 16
bias_length = (bias_length // 16) * 16 + 16 shape_bias = []
shape_bias = [] shape_bias.append(bias_length)
shape_bias.append(bias_length) return shape_bias
return shape_bias
def _get_input_shape(shape_x): def _get_input_shape(shape_x):
@ -189,7 +188,7 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
util.check_shape_size(shape_b, SHAPE_SIZE_LIMIT) util.check_shape_size(shape_b, SHAPE_SIZE_LIMIT)
try: try:
trans_a_f = bool(1 - trans_a) trans_a_f = bool(1 - trans_a)
if src_dtype == "float32" or src_dtype == "int32": if src_dtype in ("float32", "int32"):
if len(shape_a) != 2 and len(shape_b) != 2: if len(shape_a) != 2 and len(shape_b) != 2:
return False return False
if trans_b: if trans_b:
@ -239,6 +238,7 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
return False return False
except RuntimeError as e: except RuntimeError as e:
print(e)
return False return False
return True return True
@ -314,7 +314,7 @@ def CusMatMulCube(input_x1, input_x2, bias=None, output_y={}, trans_a=False, tra
src_dtype = input_x1.get("dtype").lower() src_dtype = input_x1.get("dtype").lower()
dst_dtype = output_y.get("dtype").lower() dst_dtype = output_y.get("dtype").lower()
if src_dtype == "float32" or src_dtype == "int32": if src_dtype in ("float32", "int32"):
matmul_vector_cce(shape_a, shape_b, src_dtype, trans_a, trans_b, shape_bias, kernel_name) matmul_vector_cce(shape_a, shape_b, src_dtype, trans_a, trans_b, shape_bias, kernel_name)
return return
_shape_check(shape_a, shape_b, shape_bias, src_dtype, trans_a, trans_b) _shape_check(shape_a, shape_b, shape_bias, src_dtype, trans_a, trans_b)
@ -377,7 +377,7 @@ def CusMatMulCube(input_x1, input_x2, bias=None, output_y={}, trans_a=False, tra
tensor_b = tvm.placeholder(shape_b_temp, name='tensor_b', tensor_b = tvm.placeholder(shape_b_temp, name='tensor_b',
dtype=src_dtype) dtype=src_dtype)
if len(shape_bias) > 0: if shape_bias:
tensor_bias = tvm.placeholder(shape_bias, name='tensor_bias', tensor_bias = tvm.placeholder(shape_bias, name='tensor_bias',
dtype=dst_dtype) dtype=dst_dtype)
result = te.lang.cce.matmul(tensor_a, tensor_b, trans_a, trans_b, format_a=format_a, result = te.lang.cce.matmul(tensor_a, tensor_b, trans_a, trans_b, format_a=format_a,
@ -387,7 +387,7 @@ def CusMatMulCube(input_x1, input_x2, bias=None, output_y={}, trans_a=False, tra
schedule = generic.auto_schedule(result) schedule = generic.auto_schedule(result)
tensor_list = [tensor_a, tensor_b, result] tensor_list = [tensor_a, tensor_b, result]
if len(shape_bias) > 0: if shape_bias:
tensor_list = [tensor_a, tensor_b, tensor_bias, result] tensor_list = [tensor_a, tensor_b, tensor_bias, result]
config = {"print_ir": False, config = {"print_ir": False,

@ -16,17 +16,10 @@
import functools import functools
import numpy as np import numpy as np
import mindspore.nn as nn import mindspore.nn as nn
import mindspore.context as context
import mindspore.common.dtype as mstype import mindspore.common.dtype as mstype
from mindspore import Tensor, Parameter from mindspore import Tensor
from mindspore.common.initializer import initializer
from mindspore.ops import Primitive
from mindspore.ops import composite as C
from mindspore.ops import operations as P from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops import prim_attr_register, PrimitiveWithInfer
from mindspore.ops.primitive import constexpr
from mindspore import context from mindspore import context
context.set_context(mode=context.GRAPH_MODE, save_graphs=True) context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
@ -38,7 +31,7 @@ def test_cast_op_attr():
self.cast = P.Cast() self.cast = P.Cast()
def construct(self, x, t): def construct(self, x, t):
return self.cast(x, t) return self.cast(x, t)
class CastTypeTest(nn.Cell): class CastTypeTest(nn.Cell):
def __init__(self, net): def __init__(self, net):
super(CastTypeTest, self).__init__() super(CastTypeTest, self).__init__()
@ -54,9 +47,9 @@ def test_cast_op_attr():
t5 = cast_net(z, mstype.float16) t5 = cast_net(z, mstype.float16)
return (t1, t2, t3, t4, t5) return (t1, t2, t3, t4, t5)
net = CastTypeTest(CastNet()) net = CastTypeTest(CastNet())
t1 = Tensor(np.ones([1,16,1,1918]).astype(np.int32)) t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.int32))
t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
t3 = Tensor(np.ones([1,16,1,1918]).astype(np.int32)) t3 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.int32))
out = net(t1, t2, t3) out = net(t1, t2, t3)
assert out[0].asnumpy().dtype == np.float32 assert out[0].asnumpy().dtype == np.float32
assert out[1].asnumpy().dtype == np.int32 assert out[1].asnumpy().dtype == np.int32

@ -33,13 +33,13 @@ class Net(nn.Cell):
return self.mul(x1, x2) return self.mul(x1, x2)
x1 = np.random.randn(3, 4).astype(np.float32) arr_x1 = np.random.randn(3, 4).astype(np.float32)
x2 = np.random.randn(3, 4).astype(np.float32) arr_x2 = np.random.randn(3, 4).astype(np.float32)
def test_net(): def test_net():
mul = Net() mul = Net()
output = mul(Tensor(x1), Tensor(x2)) output = mul(Tensor(arr_x1), Tensor(arr_x2))
print(x1) print(arr_x1)
print(x2) print(arr_x2)
print(output.asnumpy()) print(output.asnumpy())

@ -33,11 +33,11 @@ class Net(nn.Cell):
return self.npu_clear_float_status(x1) return self.npu_clear_float_status(x1)
x1 = np.random.randn(8).astype(np.float32) arr_x1 = np.random.randn(8).astype(np.float32)
def test_net(): def test_net():
npu_clear_float_status = Net() npu_clear_float_status = Net()
output = npu_clear_float_status(Tensor(x1)) output = npu_clear_float_status(Tensor(arr_x1))
print(x1) print(arr_x1)
print(output.asnumpy()) print(output.asnumpy())

@ -33,11 +33,11 @@ class Net(nn.Cell):
return self.npu_get_float_status(x1) return self.npu_get_float_status(x1)
x1 = np.random.randn(8).astype(np.float32) arr_x1 = np.random.randn(8).astype(np.float32)
def test_net(): def test_net():
npu_get_float_status = Net() npu_get_float_status = Net()
output = npu_get_float_status(Tensor(x1)) output = npu_get_float_status(Tensor(arr_x1))
print(x1) print(arr_x1)
print(output.asnumpy()) print(output.asnumpy())

@ -34,11 +34,11 @@ class Net(nn.Cell):
return x return x
x = np.random.random(size=(2, 2)).astype(np.float32) arr_x = np.random.random(size=(2, 2)).astype(np.float32)
def test_net(): def test_net():
pad = Net() pad = Net()
output = pad(Tensor(x)) output = pad(Tensor(arr_x))
print("=================output====================") print("=================output====================")
print(output.asnumpy()) print(output.asnumpy())

@ -33,13 +33,13 @@ class Net(nn.Cell):
return self.realdiv(x1, x2) return self.realdiv(x1, x2)
x1 = np.random.randn(3, 4).astype(np.float32) arr_x1 = np.random.randn(3, 4).astype(np.float32)
x2 = np.random.randn(3, 4).astype(np.float32) arr_x2 = np.random.randn(3, 4).astype(np.float32)
def test_net(): def test_net():
realdiv = Net() realdiv = Net()
output = realdiv(Tensor(x1), Tensor(x2)) output = realdiv(Tensor(arr_x1), Tensor(arr_x2))
print(x1) print(arr_x1)
print(x2) print(arr_x2)
print(output.asnumpy()) print(output.asnumpy())

@ -33,11 +33,11 @@ class Net(nn.Cell):
return self.reciprocal(x1) return self.reciprocal(x1)
x1 = np.random.randn(3, 4).astype(np.float32) arr_x1 = np.random.randn(3, 4).astype(np.float32)
def test_net(): def test_net():
reciprocal = Net() reciprocal = Net()
output = reciprocal(Tensor(x1)) output = reciprocal(Tensor(arr_x1))
print(x1) print(arr_x1)
print(output.asnumpy()) print(output.asnumpy())

@ -31,13 +31,13 @@ class Net(nn.Cell):
return self.scatternd(indices, update, (3, 3)) return self.scatternd(indices, update, (3, 3))
indices = np.array([[0, 1], [1, 1]]).astype(np.int32) arr_indices = np.array([[0, 1], [1, 1]]).astype(np.int32)
update = np.array([3.2, 1.1]).astype(np.float32) arr_update = np.array([3.2, 1.1]).astype(np.float32)
def test_net(): def test_net():
scatternd = Net() scatternd = Net()
print(indices) print(arr_indices)
print(update) print(arr_update)
output = scatternd(Tensor(indices), Tensor(update)) output = scatternd(Tensor(arr_indices), Tensor(arr_update))
print(output.asnumpy()) print(output.asnumpy())

@ -31,11 +31,11 @@ class Net(nn.Cell):
return self.Softmax(x) return self.Softmax(x)
x = np.array([[5, 1]]).astype(np.float32) arr_x = np.array([[5, 1]]).astype(np.float32)
def test_net(): def test_net():
softmax = Net() softmax = Net()
output = softmax(Tensor(x)) output = softmax(Tensor(arr_x))
print(x) print(arr_x)
print(output.asnumpy()) print(output.asnumpy())

@ -31,13 +31,13 @@ class Net(nn.Cell):
return self.split(x) return self.split(x)
x = np.random.randn(2, 4).astype(np.float32) arr_x = np.random.randn(2, 4).astype(np.float32)
def test_net(): def test_net():
split = Net() split = Net()
output = split(Tensor(x)) output = split(Tensor(arr_x))
print("====input========") print("====input========")
print(x) print(arr_x)
print("====output=======") print("====output=======")
print(output) print(output)

@ -31,11 +31,11 @@ class Net(nn.Cell):
return self.sqrt(x) return self.sqrt(x)
x = np.array([1.0, 4.0, 9.0]).astype(np.float32) arr_x = np.array([1.0, 4.0, 9.0]).astype(np.float32)
def test_net(): def test_net():
sqrt = Net() sqrt = Net()
output = sqrt(Tensor(x)) output = sqrt(Tensor(arr_x))
print(x) print(arr_x)
print(output.asnumpy()) print(output.asnumpy())

@ -31,11 +31,11 @@ class Net(nn.Cell):
return self.square(x) return self.square(x)
x = np.array([1.0, 4.0, 9.0]).astype(np.float32) arr_x = np.array([1.0, 4.0, 9.0]).astype(np.float32)
def test_net(): def test_net():
square = Net() square = Net()
output = square(Tensor(x)) output = square(Tensor(arr_x))
print(x) print(arr_x)
print(output.asnumpy()) print(output.asnumpy())

@ -31,13 +31,13 @@ class Net(nn.Cell):
return self.sub(x, y) return self.sub(x, y)
x = np.random.randn(1, 3, 3, 4).astype(np.float32) arr_x = np.random.randn(1, 3, 3, 4).astype(np.float32)
y = np.random.randn(1, 3, 3, 4).astype(np.float32) arr_y = np.random.randn(1, 3, 3, 4).astype(np.float32)
def test_net(): def test_net():
sub = Net() sub = Net()
output = sub(Tensor(x), Tensor(y)) output = sub(Tensor(arr_x), Tensor(arr_y))
print(x) print(arr_x)
print(y) print(arr_y)
print(output.asnumpy()) print(output.asnumpy())

@ -31,11 +31,11 @@ class Net(nn.Cell):
return self.tile(x, (1, 4)) return self.tile(x, (1, 4))
x = np.array([[0], [1], [2], [3]]).astype(np.int32) arr_x = np.array([[0], [1], [2], [3]]).astype(np.int32)
def test_net(): def test_net():
tile = Net() tile = Net()
print(x) print(arr_x)
output = tile(Tensor(x)) output = tile(Tensor(arr_x))
print(output.asnumpy()) print(output.asnumpy())

@ -68,7 +68,7 @@ def test_net_3Input():
addn = Net3I() addn = Net3I()
output = addn(Tensor(x, mstype.float32), Tensor(y, mstype.float32), Tensor(z, mstype.float32)) output = addn(Tensor(x, mstype.float32), Tensor(y, mstype.float32), Tensor(z, mstype.float32))
print("output:\n", output) print("output:\n", output)
expect_result = [[0., 3., 6.], expect_result = [[0., 3., 6.],
[9., 12., 15]] [9., 12., 15]]
assert (output.asnumpy() == expect_result).all() assert (output.asnumpy() == expect_result).all()

@ -66,7 +66,7 @@ class Net5(nn.Cell):
def test_conv2d_backprop_input(): def test_conv2d_backprop_input():
conv2d_input = Net5() conv2d_input = Net5()
output = conv2d_input() output = conv2d_input()
print("================================") print("================================")
# expect output: # expect output:
# [[[[ -5, -4, 5, 12, 0, -8] # [[[[ -5, -4, 5, 12, 0, -8]
# [-15, -6, 17, 17, -2, -11] # [-15, -6, 17, 17, -2, -11]

@ -20,7 +20,6 @@ import mindspore as ms
from mindspore import Tensor from mindspore import Tensor
from mindspore import context from mindspore import context
from mindspore import nn from mindspore import nn
from mindspore.common.parameter import Parameter, ParameterTuple
from mindspore.ops import composite as C from mindspore.ops import composite as C
from mindspore.ops import functional as F from mindspore.ops import functional as F
from mindspore.ops import operations as P from mindspore.ops import operations as P
@ -447,11 +446,14 @@ def test_index_to_switch_layer():
def test_control_depend_check(): def test_control_depend_check():
with pytest.raises(TypeError) as e: with pytest.raises(TypeError) as e:
depend = P.ControlDepend(0.0) P.ControlDepend(0.0)
print(e)
with pytest.raises(ValueError) as e: with pytest.raises(ValueError) as e:
depend = P.ControlDepend(2) P.ControlDepend(2)
print(e)
with pytest.raises(TypeError) as e: with pytest.raises(TypeError) as e:
depend = P.ControlDepend((2,)) P.ControlDepend((2,))
print(e)
def test_if_nested_compile(): def test_if_nested_compile():
@ -497,7 +499,7 @@ def test_if_inside_for():
c1 = Tensor(1, dtype=ms.int32) c1 = Tensor(1, dtype=ms.int32)
c2 = Tensor(1, dtype=ms.int32) c2 = Tensor(1, dtype=ms.int32)
net = Net() net = Net()
out = net(c1, c2) net(c1, c2)
def test_while_in_while(): def test_while_in_while():

@ -31,7 +31,6 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \
import pipeline_for_compile_forward_ge_graph_for_case_by_case_config import pipeline_for_compile_forward_ge_graph_for_case_by_case_config
from ....mindspore_test_framework.pipeline.forward.verify_exception \ from ....mindspore_test_framework.pipeline.forward.verify_exception \
import pipeline_for_verify_exception_for_case_by_case_config import pipeline_for_verify_exception_for_case_by_case_config
from mindspore import context
context.set_context(mode=context.GRAPH_MODE, save_graphs=True) context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
def conv3x3(in_channels, out_channels, stride=1, padding=1): def conv3x3(in_channels, out_channels, stride=1, padding=1):
@ -382,17 +381,18 @@ def test_conv2d_same_primitive():
class Conv2DSameNet(nn.Cell): class Conv2DSameNet(nn.Cell):
def __init__(self): def __init__(self):
super(Conv2DSameNet, self).__init__() super(Conv2DSameNet, self).__init__()
self.conv1 = nn.Conv2d(16, 64, (1, 41), (1,4), "same", 0, 1, has_bias=True) self.conv1 = nn.Conv2d(16, 64, (1, 41), (1, 4), "same", 0, 1, has_bias=True)
self.conv2 = nn.Conv2d(16, 64, (1, 41), (1,4), "same", 0, 1, has_bias=True) self.conv2 = nn.Conv2d(16, 64, (1, 41), (1, 4), "same", 0, 1, has_bias=True)
def construct(self, x, y): def construct(self, x, y):
r1 = self.conv1(x) r1 = self.conv1(x)
r2 = self.conv2(y) r2 = self.conv2(y)
return (r1, r2) return (r1, r2)
t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32))
t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
net = Conv2DSameNet() net = Conv2DSameNet()
out = net(t1, t2) net(t1, t2)
class ComparisonNet(nn.Cell): class ComparisonNet(nn.Cell):
def __init__(self): def __init__(self):
""" ComparisonNet definition """ """ ComparisonNet definition """

@ -13,30 +13,14 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
""" test nn ops """ """ test nn ops """
import functools
import numpy as np import numpy as np
import mindspore
import mindspore.nn as nn import mindspore.nn as nn
import mindspore.context as context import mindspore.context as context
import mindspore.common.dtype as mstype
from mindspore import Tensor, Parameter from mindspore import Tensor
from mindspore.common.initializer import initializer
from mindspore.ops import Primitive
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from mindspore.ops import functional as F from mindspore.ops import functional as F
from mindspore.ops import prim_attr_register, PrimitiveWithInfer from mindspore.ops import prim_attr_register, PrimitiveWithInfer
from mindspore.ops.primitive import constexpr
from ..ut_filter import non_graph_engine
from ....mindspore_test_framework.mindspore_test import mindspore_test
from ....mindspore_test_framework.pipeline.forward.compile_forward \
import pipeline_for_compile_forward_ge_graph_for_case_by_case_config
from ....mindspore_test_framework.pipeline.forward.verify_exception \
import pipeline_for_verify_exception_for_case_by_case_config
from mindspore import context
context.set_context(mode=context.GRAPH_MODE, save_graphs=True) context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
class FakeOp(PrimitiveWithInfer): class FakeOp(PrimitiveWithInfer):
@ -57,16 +41,16 @@ def test_conv2d_same_primitive():
class Conv2DSameNet(nn.Cell): class Conv2DSameNet(nn.Cell):
def __init__(self): def __init__(self):
super(Conv2DSameNet, self).__init__() super(Conv2DSameNet, self).__init__()
self.conv1 = nn.Conv2d(16, 64, (1, 41), (1,4), "same", 0, 1, has_bias=True) self.conv1 = nn.Conv2d(16, 64, (1, 41), (1, 4), "same", 0, 1, has_bias=True)
self.conv2 = nn.Conv2d(16, 64, (1, 41), (1,4), "same", 0, 1, has_bias=True) self.conv2 = nn.Conv2d(16, 64, (1, 41), (1, 4), "same", 0, 1, has_bias=True)
def construct(self, x, y): def construct(self, x, y):
r1 = self.conv1(x) r1 = self.conv1(x)
r2 = self.conv2(y) r2 = self.conv2(y)
return (r1, r2) return (r1, r2)
t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32))
t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
net = Conv2DSameNet() net = Conv2DSameNet()
out = net(t1, t2) net(t1, t2)
# test cell as high order argument # test cell as high order argument
# The graph with free variables used as argument is not supported yet # The graph with free variables used as argument is not supported yet
@ -87,10 +71,10 @@ def Xtest_conv2d_op_with_arg():
a = self.opnet(conv_op, x) a = self.opnet(conv_op, x)
b = self.opnet(conv_op, y) b = self.opnet(conv_op, y)
return (a, b) return (a, b)
t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32))
t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
net = OpsNet(Conv2dNet()) net = OpsNet(Conv2dNet())
out = net(t1, t2) net(t1, t2)
def test_conv2d_op_with_arg(): def test_conv2d_op_with_arg():
@ -115,11 +99,10 @@ def test_conv2d_op_with_arg():
a = self.opnet(op, x, y) a = self.opnet(op, x, y)
b = self.opnet(op, y, x) b = self.opnet(op, y, x)
return (a, b) return (a, b)
t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32))
t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
net = OpsNet(OpNet()) net = OpsNet(OpNet())
out = net(t1, t2) net(t1, t2)
def test_conv2d_op_with_arg_same_input(): def test_conv2d_op_with_arg_same_input():
@ -144,10 +127,10 @@ def test_conv2d_op_with_arg_same_input():
a = self.opnet(op, x, x) a = self.opnet(op, x, x)
b = self.opnet(op, y, x) b = self.opnet(op, y, x)
return (a, b) return (a, b)
t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32))
t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
net = OpsNet(OpNet()) net = OpsNet(OpNet())
out = net(t1, t2) net(t1, t2)
# test op with partial # test op with partial
def test_op_as_partial(): def test_op_as_partial():
@ -160,11 +143,11 @@ def test_op_as_partial():
a = partial_op(y) a = partial_op(y)
b = partial_op(z) b = partial_op(z)
return a, b return a, b
t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32))
t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
t3 = Tensor(np.ones([1,16,1,1234]).astype(np.float32)) t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32))
net = OpAsPartial() net = OpAsPartial()
out = net(t1, t2, t3) net(t1, t2, t3)
# test op with partial # test op with partial
def test_op_as_partial_inside(): def test_op_as_partial_inside():
@ -182,13 +165,14 @@ def test_op_as_partial_inside():
super(OuterNet, self).__init__() super(OuterNet, self).__init__()
self.net = OpAsPartial() self.net = OpAsPartial()
def construct(self, x, y, z): def construct(self, x, y, z):
a,b = self.net(x, y, z) a, b = self.net(x, y, z)
return a, b return a, b
t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32))
t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
t3 = Tensor(np.ones([1,16,1,1234]).astype(np.float32)) t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32))
net = OuterNet() net = OuterNet()
out = net(t1, t2, t3) net(t1, t2, t3)
# test op with partial case 2 # test op with partial case 2
def test_op_as_partial_independent(): def test_op_as_partial_independent():
@ -202,11 +186,12 @@ def test_op_as_partial_independent():
partial_op2 = F.partial(self.op, x) partial_op2 = F.partial(self.op, x)
b = partial_op2(z) b = partial_op2(z)
return a, b return a, b
t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32))
t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
t3 = Tensor(np.ones([1,16,1,1234]).astype(np.float32)) t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32))
net = OpAsPartial() net = OpAsPartial()
out = net(t1, t2, t3) net(t1, t2, t3)
def test_nest_partial(): def test_nest_partial():
class NestPartial(nn.Cell): class NestPartial(nn.Cell):
@ -221,11 +206,11 @@ def test_nest_partial():
partial_op4 = F.partial(partial_op3, x) partial_op4 = F.partial(partial_op3, x)
b = partial_op4(z) b = partial_op4(z)
return a, b return a, b
t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32))
t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
t3 = Tensor(np.ones([1,16,1,1234]).astype(np.float32)) t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32))
net = NestPartial() net = NestPartial()
out = net(t1, t2, t3) net(t1, t2, t3)
# high order argument # high order argument
# op and op args as network arguments # op and op args as network arguments
@ -245,11 +230,11 @@ def test_op_with_arg_as_input():
a = self.opnet(op, x, z) a = self.opnet(op, x, z)
b = self.opnet(op, x, y) b = self.opnet(op, x, y)
return (a, b) return (a, b)
t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32))
t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
t3 = Tensor(np.ones([1,16,1,1234]).astype(np.float32)) t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32))
net = OpsNet(WithOpArgNet()) net = OpsNet(WithOpArgNet())
out = net(t1, t2, t3) net(t1, t2, t3)
# The partial application used as argument is not supported yet # The partial application used as argument is not supported yet
# because of the limit of inference specialize system # because of the limit of inference specialize system
@ -269,8 +254,8 @@ def Xtest_partial_as_arg():
a = self.partial_net(partial_op, z) a = self.partial_net(partial_op, z)
b = self.partial_net(partial_op, y) b = self.partial_net(partial_op, y)
return (a, b) return (a, b)
t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32))
t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
t3 = Tensor(np.ones([1,16,1,1234]).astype(np.float32)) t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32))
net = OpsNet(PartialArgNet()) net = OpsNet(PartialArgNet())
out = net(t1, t2, t3) net(t1, t2, t3)

@ -982,7 +982,7 @@ def test_bprop_with_wrong_output_shape():
@bprop_getters.register(BpropWithWrongOutputShape) @bprop_getters.register(BpropWithWrongOutputShape)
def get_bprop_with_wrong_output_shape(self): def get_bprop_with_wrong_output_shape(self):
"""Generate bprop for BpropWithWrongOutputShape""" """Generate bprop for BpropWithWrongOutputShape"""
ones = Tensor(np.ones([2, ]).astype(np.int32)) ones = Tensor(np.ones([2,]).astype(np.int32))
def bprop(x, out, dout): def bprop(x, out, dout):
return (ones,) return (ones,)

Loading…
Cancel
Save