!1426 Fix the issues checked by pylint

Merge pull request !1426 from chengang/fix_pylint
pull/1426/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit e5c7ecfd46

@ -48,15 +48,15 @@ def test_argmax():
expect2 = np.array([1, 0, 0, 0]).astype(np.int32)
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
Argmax = NetArgmax()
output = Argmax(x)
argmax = NetArgmax()
output = argmax(x)
assert (output[0].asnumpy() == expect1).all()
assert (output[1].asnumpy() == expect2).all()
assert (output[2].asnumpy() == expect2).all()
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
Argmax1 = NetArgmax()
output1 = Argmax(x)
argmax1 = NetArgmax()
output1 = argmax1(x)
assert (output1[0].asnumpy() == expect1).all()
assert (output1[1].asnumpy() == expect2).all()
assert (output1[2].asnumpy() == expect2).all()

@ -23,13 +23,13 @@ from mindspore.ops import operations as P
class Net(nn.Cell):
def __init__(self, value):
def __init__(self, param):
super(Net, self).__init__()
self.var = Parameter(value, name="var")
self.var = Parameter(param, name="var")
self.assign = P.Assign()
def construct(self, value):
return self.assign(self.var, value)
def construct(self, param):
return self.assign(self.var, param)
x = np.array([[1.2, 1], [1, 0]]).astype(np.float32)

@ -20,9 +20,6 @@ import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common import dtype as mstype
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
@ -38,7 +35,7 @@ class BatchMatMulNet(nn.Cell):
return self.batch_matmul(x, y)
def test_4D():
def test_4d():
input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float32)
input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32)
@ -60,7 +57,7 @@ def test_4D():
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_4D_transpose_a():
def test_4d_transpose_a():
input_x = Tensor(np.arange(2 * 4 * 3 * 1).reshape(2, 4, 3, 1), mstype.float32)
input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32)
@ -82,7 +79,7 @@ def test_4D_transpose_a():
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_4D_transpose_b():
def test_4d_transpose_b():
input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float32)
input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32)
@ -104,7 +101,7 @@ def test_4D_transpose_b():
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_4D_transpose_ab():
def test_4d_transpose_ab():
input_x = Tensor(np.arange(2 * 4 * 3 * 1).reshape(2, 4, 3, 1), mstype.float32)
input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32)
@ -122,17 +119,7 @@ def test_4D_transpose_ab():
[[5612, 5810, 6008, 6206]]]]
assert (output.asnumpy() == expect).all()
class BatchMatMulNet(nn.Cell):
def __init__(self, transpose_a=False, transpose_b=False):
super(BatchMatMulNet, self).__init__()
self.batch_matmul = P.BatchMatMul(transpose_a, transpose_b)
def construct(self, x, y):
return self.batch_matmul(x, y)
def test_4D_fp16():
def test_4d_fp16():
input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float16)
input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float16)

@ -68,10 +68,10 @@ def test_batchnrom_fold2():
current_step = np.array([0]).astype('int32')
output = net(Tensor(x), Tensor(beta), Tensor(gamma), Tensor(batch_std), Tensor(batch_mean),
Tensor(running_std), Tensor(running_mean), Tensor(current_step))
expect = (x + beta.reshape(-1, 1, 1) - (gamma * running_mean / running_std).reshape(-1, 1,
1) if current_step >= freeze_bn else
x * (running_std / batch_std).reshape(-1, 1, 1) + (beta - gamma * batch_mean / batch_std).reshape(-1, 1,
1))
expect = ((x + beta.reshape(-1, 1, 1) -
(gamma * running_mean / running_std).reshape(-1, 1, 1) if current_step >= freeze_bn else
x * (running_std / batch_std).reshape(-1, 1, 1) +
(beta - gamma * batch_mean / batch_std).reshape(-1, 1, 1)))
error = np.ones(shape=expect.shape) * 1.0e-6
diff = output.asnumpy() - expect
assert np.all(diff < error)
@ -80,10 +80,9 @@ def test_batchnrom_fold2():
current_step = np.array([100000]).astype('int32')
output = net(Tensor(x), Tensor(beta), Tensor(gamma), Tensor(batch_std), Tensor(batch_mean), Tensor(running_std),
Tensor(running_mean), Tensor(current_step))
expect = (x + beta.reshape(-1, 1, 1) - (gamma * running_mean / running_std).reshape(-1, 1,
1) if current_step >= freeze_bn else
x * (batch_std / running_std).reshape(-1, 1, 1) + (beta - gamma * batch_mean / batch_std).reshape(-1, 1,
1))
expect = ((x + beta.reshape(-1, 1, 1) - (gamma * running_mean / running_std).reshape(-1, 1, 1)
if current_step >= freeze_bn else x * (batch_std / running_std).reshape(-1, 1, 1) +
(beta - gamma * batch_mean / batch_std).reshape(-1, 1, 1)))
error = np.ones(shape=expect.shape) * 1.0e-6
diff = output.asnumpy() - expect
assert np.all(diff < error)

@ -38,8 +38,8 @@ class Net(nn.Cell):
def np_result(d_batch_mean, d_batch_std, x, batch_mean, batch_std):
n = x.shape[0] * x.shape[2] * x.shape[3]
dx = d_batch_mean.reshape(1, -1, 1, 1) / n + d_batch_std.reshape(1, -1, 1, 1) * (
x - batch_mean.reshape(1, -1, 1, 1)) / batch_std.reshape(1, -1, 1, 1) / n
dx = (d_batch_mean.reshape(1, -1, 1, 1) / n + d_batch_std.reshape(1, -1, 1, 1) *
(x - batch_mean.reshape(1, -1, 1, 1)) / batch_std.reshape(1, -1, 1, 1) / n)
return dx

@ -86,7 +86,7 @@ def test_batchnorm_fold2():
ms_var = Tensor(variance)
batch_mean, batch_var, delay_mean, delay_std = net(Tensor(x), ms_mean, ms_var,
Tensor(current_step))
expect1, expect2, expect3, expect4, expect5, expect6 = np_result(x, mean, variance, 0.9, 1e-12)
expect1, expect2, expect3, _, expect5, expect6 = np_result(x, mean, variance, 0.9, 1e-12)
assert np.allclose(batch_mean.asnumpy(), expect1, rtol=1.e-7, atol=1.e-5)
assert np.allclose(batch_var.asnumpy(), expect2, rtol=1.e-7, atol=1.e-5)
assert np.allclose(ms_mean.asnumpy(), expect3, rtol=1.e-7, atol=1.e-5)
@ -108,7 +108,7 @@ def test_batchnorm_fold_freeze():
ms_var = Tensor(variance)
batch_mean, batch_var, delay_mean, delay_std = net(Tensor(x), ms_mean, ms_var,
Tensor(current_step))
expect1, expect2, expect3, expect4, expect5, expect6 = np_result(x, mean, variance, 0.9, 1e-12)
_, _, _, _, expect5, expect6 = np_result(x, mean, variance, 0.9, 1e-12)
assert np.allclose(batch_mean.asnumpy(), np.zeros_like(mean), rtol=1.e-7, atol=1.e-5)
assert np.allclose(batch_var.asnumpy(), np.ones_like(mean), rtol=1.e-7, atol=1.e-5)
assert np.allclose(ms_mean.asnumpy(), mean, rtol=1.e-7, atol=1.e-5)

@ -61,9 +61,6 @@ def test_train_forward():
[-0.0281, 0.9119, 1.3819, 1.8518],
[2.7918, 0.4419, -0.4981, 0.9119],
[1.8518, 0.9119, 2.3218, -0.9680]]]]).astype(np.float32)
grad = np.array([[
[[1, 2, 7, 1], [4, 2, 1, 3], [1, 6, 5, 2], [2, 4, 3, 2]],
[[9, 4, 3, 5], [1, 3, 7, 6], [5, 7, 9, 9], [1, 4, 6, 8]]]]).astype(np.float32)
weight = np.ones(2).astype(np.float32)
bias = np.ones(2).astype(np.float32)

@ -16,10 +16,8 @@
import numpy as np
import pytest
import mindspore.common.dtype as mstype
import mindspore.context as context
from mindspore.common.tensor import Tensor
from mindspore.nn import Cell
from mindspore.ops import operations as P

@ -47,9 +47,9 @@ def test_cast():
net = Net()
output = net(x0, t0, x1, t1)
type0 = output[0].asnumpy().dtype
assert (type0 == 'float16')
assert type0 == 'float16'
type1 = output[1].asnumpy().dtype
assert (type1 == 'float32')
assert type1 == 'float32'
@pytest.mark.level0
@ -65,6 +65,6 @@ def test_cast1():
net = Net()
output = net(x0, t0, x1, t1)
type0 = output[0].asnumpy().dtype
assert (type0 == 'float32')
assert type0 == 'float32'
type1 = output[1].asnumpy().dtype
assert (type1 == 'float32')
assert type1 == 'float32'

@ -66,13 +66,6 @@ def test_conv2d_backprop_filter():
[-3, -2, -3, -16]]]]).astype(np.float32))
conv2d_filter = Conv2dFilter()
output = conv2d_filter(out, x, w)
print("================================")
"""
expect output:
[[[[ -60, -142, -265]
[-104, -211, -322]
[-102, -144, -248]]]]
"""
expect = np.array([[[[-60, -142, -265],
[-104, -211, -322],
[-102, -144, -248]]]]).astype(np.float32)

@ -20,8 +20,6 @@ import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
context.set_context(device_target='GPU')

@ -57,16 +57,6 @@ def test_conv2d():
conv2d = NetConv2d()
output = conv2d(x, w)
assert (output.asnumpy() == expect).all()
"""
expect output:
[[[[ 45. 48. 51.]
[ 54. 57. 60.]
[ 63. 66. 69.]]
[[126. 138. 150.]
[162. 174. 186.]
[198. 210. 222.]]]]
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
conv2d = NetConv2d()
output = conv2d(x, w)

@ -14,7 +14,6 @@
# ============================================================================
import numpy as np
import os
import pytest
import mindspore.context as context
@ -51,5 +50,5 @@ def test_correction_mul_grad():
expect = [0, 0]
expect[0] = (dout * np.reshape(batch_std / running_std, (co, 1, 1, 1)))
expect[1] = (np.sum(dout * x, (1, 2, 3)) / running_std)
for i, v in enumerate(output):
assert (np.allclose(output[i].asnumpy(), expect[i], rtol=1.e-5, atol=1.e-5))
for i, _ in enumerate(output):
assert np.allclose(output[i].asnumpy(), expect[i], rtol=1.e-5, atol=1.e-5)

@ -50,4 +50,4 @@ def test_correction_mul():
diff = output.asnumpy() - expect
assert np.all(diff < error)
assert np.all(diff > error * -1)
assert (output.shape() == expect.shape)
assert output.shape() == expect.shape

@ -68,8 +68,8 @@ class GradData(nn.Cell):
self.grad = GradOperation(name="get_all", get_all=True, sens_param=True)
self.network = network
def construct(self, input, output_grad):
return self.grad(self.network)(input, output_grad)
def construct(self, inputs, output_grad):
return self.grad(self.network)(inputs, output_grad)
class GradWeight(nn.Cell):
@ -172,8 +172,8 @@ class Grad(nn.Cell):
self.grad = GradOperation(name="get_all", get_all=True, sens_param=True)
self.network = network
def construct(self, input, bias, dy):
return self.grad(self.network)(input, bias, dy)
def construct(self, inputs, bias, dy):
return self.grad(self.network)(inputs, bias, dy)
@pytest.mark.level0

@ -50,16 +50,16 @@ def test_equal():
equal = NetEqual()
output0 = equal(x0, y0)
assert np.all(output0.asnumpy() == expect0)
assert (output0.shape() == expect0.shape)
assert output0.shape() == expect0.shape
output1 = equal(x1, y1)
assert np.all(output1.asnumpy() == expect1)
assert (output1.shape() == expect1.shape)
assert output1.shape() == expect1.shape
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
equal = NetEqual()
output0 = equal(x0, y0)
assert np.all(output0.asnumpy() == expect0)
assert (output0.shape() == expect0.shape)
assert output0.shape() == expect0.shape
output1 = equal(x1, y1)
assert np.all(output1.asnumpy() == expect1)
assert (output1.shape() == expect1.shape)
assert output1.shape() == expect1.shape

@ -49,19 +49,19 @@ def test_exp():
output0 = exp(x0)
diff0 = output0.asnumpy() - expect0
assert np.all(diff0 < error0)
assert (output0.shape() == expect0.shape)
assert output0.shape() == expect0.shape
output1 = exp(x1)
diff1 = output1.asnumpy() - expect1
assert np.all(diff1 < error1)
assert (output1.shape() == expect1.shape)
assert output1.shape() == expect1.shape
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
exp = NetExp()
output0 = exp(x0)
diff0 = output0.asnumpy() - expect0
assert np.all(diff0 < error0)
assert (output0.shape() == expect0.shape)
assert output0.shape() == expect0.shape
output1 = exp(x1)
diff1 = output1.asnumpy() - expect1
assert np.all(diff1 < error1)
assert (output1.shape() == expect1.shape)
assert output1.shape() == expect1.shape

@ -19,7 +19,6 @@ import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops.operations import _grad_ops as G
@ -38,11 +37,6 @@ class NetFlattenGrad(nn.Cell):
@pytest.mark.env_onecard
def test_flatten_grad():
x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]).astype(np.float32))
"""
expect output:
[ [-0.1 0.3 3.6]
[ 0.4 0.5 -3.2] ]
"""
expect = np.array([[-0.1, 0.3, 3.6],
[0.4, 0.5, -3.2]]).astype(np.float32)

@ -37,11 +37,6 @@ class NetFlatten(nn.Cell):
def test_flatten():
x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]).astype(np.float32))
expect = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]).astype(np.float32)
"""
expect output:
[[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
flatten = NetFlatten()
output = flatten(x)

@ -68,7 +68,7 @@ x3 = np.array([[1, 2], [3, 4], [5.0, 88.0]]).astype(np.float32)
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_status():
ms_status = Net();
ms_status = Net()
output1 = ms_status(Tensor(x1))
output2 = ms_status(Tensor(x2))
output3 = ms_status(Tensor(x3))
@ -84,7 +84,7 @@ def test_status():
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_nan():
ms_isnan = Netnan();
ms_isnan = Netnan()
output1 = ms_isnan(Tensor(x1))
output2 = ms_isnan(Tensor(x2))
output3 = ms_isnan(Tensor(x3))
@ -100,7 +100,7 @@ def test_nan():
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_inf():
ms_isinf = Netinf();
ms_isinf = Netinf()
output1 = ms_isinf(Tensor(x1))
output2 = ms_isinf(Tensor(x2))
output3 = ms_isinf(Tensor(x3))
@ -116,7 +116,7 @@ def test_inf():
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_finite():
ms_isfinite = Netfinite();
ms_isfinite = Netfinite()
output1 = ms_isfinite(Tensor(x1))
output2 = ms_isfinite(Tensor(x2))
output3 = ms_isfinite(Tensor(x3))

@ -913,16 +913,16 @@ class GatherNet2(nn.Cell):
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gather2():
x = Tensor(np.array([[4., 5., 4., 1., 5., ],
[4., 9., 5., 6., 4., ],
[9., 8., 4., 3., 6., ],
[0., 4., 2., 2., 8., ],
[1., 8., 6., 2., 8., ],
[8., 1., 9., 7., 3., ],
[7., 9., 2., 5., 7., ],
[9., 8., 6., 8., 5., ],
[3., 7., 2., 7., 4., ],
[4., 2., 8., 2., 9., ]]
x = Tensor(np.array([[4., 5., 4., 1., 5.,],
[4., 9., 5., 6., 4.,],
[9., 8., 4., 3., 6.,],
[0., 4., 2., 2., 8.,],
[1., 8., 6., 2., 8.,],
[8., 1., 9., 7., 3.,],
[7., 9., 2., 5., 7.,],
[9., 8., 6., 8., 5.,],
[3., 7., 2., 7., 4.,],
[4., 2., 8., 2., 9.,]]
).astype(np.float32))
indices = Tensor(np.array([[4000, 1, 300000]]).astype(np.int32))

@ -19,8 +19,6 @@ import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from mindspore.ops.operations import _grad_ops as G
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")

@ -50,10 +50,10 @@ def test_log():
output1 = log(x1)
diff0 = output0.asnumpy() - expect0
assert np.all(diff0 < error0)
assert (output0.shape() == expect0.shape)
assert output0.shape() == expect0.shape
diff1 = output1.asnumpy() - expect1
assert np.all(diff1 < error1)
assert (output1.shape() == expect1.shape)
assert output1.shape() == expect1.shape
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
log = NetLog()
@ -61,7 +61,7 @@ def test_log():
output1 = log(x1)
diff0 = output0.asnumpy() - expect0
assert np.all(diff0 < error0)
assert (output0.shape() == expect0.shape)
assert output0.shape() == expect0.shape
diff1 = output1.asnumpy() - expect1
assert np.all(diff1 < error1)
assert (output1.shape() == expect1.shape)
assert output1.shape() == expect1.shape

@ -27,8 +27,8 @@ class NetAnd(Cell):
super(NetAnd, self).__init__()
self.logicaland = P.LogicalAnd()
def construct(self, x, y):
return self.logicaland(x, y)
def construct(self, input_x, input_y):
return self.logicaland(input_x, input_y)
class NetOr(Cell):
@ -36,8 +36,8 @@ class NetOr(Cell):
super(NetOr, self).__init__()
self.logicalor = P.LogicalOr()
def construct(self, x, y):
return self.logicalor(x, y)
def construct(self, input_x, input_y):
return self.logicalor(input_x, input_y)
class NetNot(Cell):
@ -45,8 +45,8 @@ class NetNot(Cell):
super(NetNot, self).__init__()
self.logicalnot = P.LogicalNot()
def construct(self, x):
return self.logicalnot(x)
def construct(self, input_x):
return self.logicalnot(input_x)
x = np.array([True, False, False]).astype(np.bool)

@ -35,8 +35,8 @@ def test_logsoftmax():
[-3.452001, -1.2546989, -1.4618242, -0.79552734]]).astype(np.float32)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
LogSoftmax = P.LogSoftmax()
output = LogSoftmax(Tensor(x))
logSoftmax = P.LogSoftmax()
output = logSoftmax(Tensor(x))
assert np.allclose(output.asnumpy(), expect)
@ -134,7 +134,7 @@ def test_logsoftmaxgrad1():
[-0.01768187, 0.26872346, -0.5037259, -0.3376058, -0.3291146, 1.4752979, -0.25972134, 0.8869053,
0.25325722, -0.13946185],
[-0.5247209, 0.70192003, -1.0808672, 1.4858199, -1.1273282, 0.20728993, 0.38918605, 0.08162117,
0.10445589, 0.3220427]], ).astype(np.float32)
0.10445589, 0.3220427]],).astype(np.float32)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = LogSoftmax(0)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save