clean pylint

pull/1532/head
jinyaohui 5 years ago
parent 85e686e0b3
commit 86d197dfeb

@ -244,8 +244,8 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
return True
# pylint: disable=locally-disabled,too-many-arguments, too-many-locals, too-many-statements
# pylint: disable=locally-disabled,too-many-arguments, too-many-locals, too-many-statements,
# pylint: disable=inconsistent-return-statements
# @util.check_input_type(dict, dict, (dict, NoneType), dict, bool, bool, str)
@op_info_register(matmul_cube_dense_left_op_info)
def CusMatMulCubeDenseLeft(input_x1, input_x2, bias=None, output_y={}, trans_a=False, trans_b=False,

@ -40,6 +40,7 @@ matmul_cube_dense_right_op_info = TBERegOp("CusMatMulCubeDenseRight") \
.get_op_info()
# pylint: disable=inconsistent-return-statements
@op_info_register(matmul_cube_dense_right_op_info)
def CusMatMulCubeDenseRight(input_x1, input_x2, input_x3, bias=None, output_y={}, trans_a=False, trans_b=False,
kernel_name="matmulcube"):

@ -31,6 +31,8 @@ from .transformer_model import TransformerModel
GRADIENT_CLIP_TYPE = 1
GRADIENT_CLIP_VALUE = 5.0
# pylint: disable=consider-using-in
class ClipGradients(nn.Cell):
"""
Clip gradients.
@ -48,6 +50,7 @@ class ClipGradients(nn.Cell):
self.clip_by_norm = nn.ClipByNorm()
self.cast = P.Cast()
self.dtype = P.DType()
def construct(self,
grads,
clip_type,
@ -217,10 +220,12 @@ class TransformerTrainOneStepCell(nn.Cell):
grad_scale = C.MultitypeFuncGraph("grad_scale")
reciprocal = P.Reciprocal()
@grad_scale.register("Tensor", "Tensor")
def tensor_grad_scale(scale, grad):
return grad * F.cast(reciprocal(scale), F.dtype(grad))
class TransformerTrainOneStepWithLossScaleCell(nn.Cell):
"""
Encapsulation class of Transformer network training.

@ -34,6 +34,9 @@ GRADIENT_CLIP_VALUE = 1.0
_nn_clip_by_norm = nn.ClipByNorm()
clip_grad = C.MultitypeFuncGraph("clip_grad")
# pylint: disable=consider-using-in
@clip_grad.register("Number", "Number", "Tensor")
def _clip_grad(clip_type, clip_value, grad):
"""
@ -57,6 +60,7 @@ def _clip_grad(clip_type, clip_value, grad):
new_grad = _nn_clip_by_norm(grad, F.cast(F.tuple_to_array((clip_value,)), dt))
return new_grad
class GetMaskedLMOutput(nn.Cell):
"""
Get masked lm output.
@ -377,6 +381,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell):
self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32),
name="loss_scale")
self.add_flags(has_effect=True)
def construct(self,
input_ids,
input_mask,

@ -15,14 +15,15 @@
"""Test bert submodules."""
import numpy as np
import os
from mindspore import Tensor
from mindspore import nn, context
import numpy as np
from mindspore.model_zoo.Bert_NEZHA import EmbeddingLookup, GetMaskedLMOutput, \
BertConfig, BertPreTraining, BertNetworkWithLoss
from mindspore.model_zoo.Bert_NEZHA.bert_model import BertModel
from mindspore import Tensor
from mindspore import nn, context
from ..mindspore_test import mindspore_test
from ..pipeline.forward.compile_forward import pipeline_for_compile_forward_anf_graph_for_case_by_case_config, \
pipeline_for_compile_forward_ge_graph_for_case_by_case_config

@ -15,9 +15,10 @@
"""Component that Check if the function raises the expected Exception."""
import pytest
import sys
import pytest
from ...components.icomponent import IExectorComponent
from ...utils import keyword

@ -16,9 +16,10 @@
"""Implementation of Numerical gradients checking."""
# pylint: disable=missing-docstring
from typing import Callable, List, Any
import mindspore._c_expression as _c_expression
import numpy as np
from typing import Callable, List, Any
from mindspore import ParameterTuple
from mindspore import Tensor

@ -15,9 +15,10 @@
"""Dataset utils."""
import numpy as np
import random
import numpy as np
from mindspore import Tensor

@ -24,8 +24,7 @@ from mindspore.ops import operations as P
from mindspore.ops._grad.grad_base import bprop_getters
from mindspore.ops.primitive import prim_attr_register, PrimitiveWithInfer
logging.basicConfig(level=logging.DEBUG, format=
'[%(levelname)s] %(asctime)s %(pathname)s:%(lineno)d %(message)s')
logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] %(asctime)s %(pathname)s:%(lineno)d %(message)s')
logger = logging.getLogger(__name__)

@ -14,9 +14,8 @@
# ============================================================================
"""Other utils."""
import mindspore._c_expression as _c_expression
import numpy as np
import mindspore._c_expression as _c_expression
from mindspore.common.tensor import Tensor

@ -34,6 +34,9 @@ GRADIENT_CLIP_VALUE = 1.0
_nn_clip_by_norm = nn.ClipByNorm()
clip_grad = C.MultitypeFuncGraph("clip_grad")
# pylint: disable=consider-using-in
@clip_grad.register("Number", "Number", "Tensor")
def _clip_grad(clip_type, clip_value, grad):
"""
@ -57,6 +60,7 @@ def _clip_grad(clip_type, clip_value, grad):
new_grad = _nn_clip_by_norm(grad, F.cast(F.tuple_to_array((clip_value,)), dt))
return new_grad
class GetMaskedLMOutput(nn.Cell):
"""
Get masked lm output.
@ -377,6 +381,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell):
self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32),
name="loss_scale")
self.add_flags(has_effect=True)
def construct(self,
input_ids,
input_mask,

@ -23,10 +23,12 @@ from mindspore.ops import functional as F, composite as C
import mindspore.context as context
import pytest
class TensorIntAutoCast(nn.Cell):
def __init__(self, ):
super(TensorIntAutoCast, self).__init__()
self.i = 2
def construct(self, t):
z = F.tensor_mul(t, self.i)
return z
@ -36,6 +38,7 @@ class TensorFPAutoCast(nn.Cell):
def __init__(self, ):
super(TensorFPAutoCast, self).__init__()
self.f = 1.2
def construct(self, t):
z = F.tensor_mul(t, self.f)
return z
@ -45,13 +48,16 @@ class TensorBoolAutoCast(nn.Cell):
def __init__(self, ):
super(TensorBoolAutoCast, self).__init__()
self.f = True
def construct(self, t):
z = F.tensor_mul(t, self.f)
return z
class TensorAutoCast(nn.Cell):
def __init__(self, ):
super(TensorAutoCast, self).__init__()
def construct(self, t1, t2):
z = F.tensor_mul(t1, t2)
return z
@ -210,7 +216,6 @@ def test_tensor_auto_cast():
with pytest.raises(TypeError):
net(t_uint64, t_fp64)
with pytest.raises(TypeError):
tfp(t_uint16)
with pytest.raises(TypeError):

@ -21,6 +21,7 @@ import mindspore.common.dtype as mstype
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore import context
context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
@ -29,6 +30,7 @@ def test_cast_op_attr():
def __init__(self):
super(CastNet, self).__init__()
self.cast = P.Cast()
def construct(self, x, t):
return self.cast(x, t)
@ -37,6 +39,7 @@ def test_cast_op_attr():
super(CastTypeTest, self).__init__()
self.net = net
self.cast = P.Cast()
def construct(self, x, y, z):
cast_op = self.cast
t1 = cast_op(x, mstype.float32)
@ -46,6 +49,7 @@ def test_cast_op_attr():
t4 = cast_net(y, mstype.int32)
t5 = cast_net(z, mstype.float16)
return (t1, t2, t3, t4, t5)
net = CastTypeTest(CastNet())
t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.int32))
t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))

@ -142,4 +142,6 @@ def test_transpose():
assert (output[1].asnumpy() == expect1).all()
assert (output[2].asnumpy() == expect2).all()
assert (output[3].asnumpy() == expect3).all()
test_transpose()

@ -1043,6 +1043,7 @@ def test_print_tuple_wrapper(tag):
return fns[tag]
# pylint: disable=unnecessary-semicolon
def test_constant_duplicate_mul(tag):
fns = FnDict()
Mul = Primitive('Mul');

@ -152,7 +152,7 @@ def test_dict_set_item():
x = Tensor(np.ones([2, 2, 3], np.float32))
net = DictSetNet()
out = net(x)
_ = net(x)
# if the dictionary item does not exist, create a new one
@ -168,4 +168,4 @@ def test_dict_set_item_create_new():
return my_dict
x = Tensor(np.ones([2, 2, 3], np.float32))
net = DictSetNet()
out = net(x)
_ = net(x)

@ -81,31 +81,3 @@ def test_hypermap_func_const():
net = NetMap()
assert net() == (8, 12, 16)
"""
def test_hypermap_func_variable():
class NetMap(Cell):
def __init__(self):
super(NetMap, self).__init__()
def double(self, x):
return 2 * x
def triple(self, x):
return 3 * x
def square(self, x):
return x * x
def construct(self, x):
_list = [self.double, self.triple, self.square]
return map(lambda f: f(x), _list)
x = Tensor(np.ones([3, 2, 3], np.float32))
net = NetMap()
with pytest.raises(RuntimeError) as ex:
net(x)
assert "HyperMap don't support Closure with free variable yet" in str(ex.value)
"""

@ -99,4 +99,4 @@ def test_assignadd_scalar_cast():
net = AssignAddNet()
x = Tensor(np.ones([1]).astype(np.int64) * 102)
# _executor.compile(net, 1)
result = net(x)
_ = net(x)

@ -429,9 +429,9 @@ def test_tensor_dtype_np_int64():
def test_tensor_dtype_fp32_to_bool():
with pytest.raises(RuntimeError):
input = np.random.randn(2, 3, 4, 5).astype(np.float32)
input = ms.Tensor(input)
input_me = ms.Tensor(input, dtype=ms.bool_)
input_ = np.random.randn(2, 3, 4, 5).astype(np.float32)
input_ = ms.Tensor(input_)
_ = ms.Tensor(input_, dtype=ms.bool_)
def test_tensor_operation():

@ -41,10 +41,10 @@ class Func(nn.Cell):
def construct(self, x, y):
init = self.alloc_status()
sum = add(x, y)
sum_ = add(x, y)
product = mul1(x, y)
flag = self.get_status(init)
out = add2(sum, product)
out = add2(sum_, product)
clear = self.clear_status(flag)
out = F.depend(out, clear)
return out
@ -88,7 +88,7 @@ def test_sens():
sens = Tensor(np.ones([3, 3]).astype(np.float32))
net = Net()
net.add_flags(has_effect=True)
out = net(x, y, sens)
_ = net(x, y, sens)
class Net_hyper(nn.Cell):
@ -119,7 +119,7 @@ def test_hyper_add():
sens = Tensor(np.ones([3, 3]).astype(np.float32))
net = Net_hyper()
net.add_flags(has_effect=True)
out = net(x, y, sens)
_ = net(x, y, sens)
def test_keep_order_io_effect_exception_return_dtype():

@ -148,9 +148,6 @@ def test_cast():
_executor.compile(net, x)
"""test grad of PReLU, which cause AddN(generated by grad) fail"""
class IRBlockZ(nn.Cell):
def __init__(self, inplanes, planes):
super(IRBlockZ, self).__init__()

@ -46,6 +46,7 @@ class MaxNet(nn.Cell):
kernel_size,
stride=None,
padding=0):
_ = padding
super(MaxNet, self).__init__()
self.maxpool = nn.MaxPool2d(kernel_size,
stride)
@ -73,5 +74,5 @@ class Avg1dNet(nn.Cell):
def test_avg1d():
net = Avg1dNet(6, 1)
input = Tensor(np.random.randint(0, 255, [1, 3, 6]).astype(np.float32))
_executor.compile(net, input)
input_ = Tensor(np.random.randint(0, 255, [1, 3, 6]).astype(np.float32))
_executor.compile(net, input_)

@ -52,19 +52,19 @@ def test_compile_psnr_grayscale():
def test_psnr_max_val_negative():
max_val = -1
with pytest.raises(ValueError):
net = PSNRNet(max_val)
_ = PSNRNet(max_val)
def test_psnr_max_val_bool():
max_val = True
with pytest.raises(TypeError):
net = PSNRNet(max_val)
_ = PSNRNet(max_val)
def test_psnr_max_val_zero():
max_val = 0
with pytest.raises(ValueError):
net = PSNRNet(max_val)
_ = PSNRNet(max_val)
def test_psnr_different_shape():

@ -51,59 +51,59 @@ def test_compile_grayscale():
def test_ssim_max_val_negative():
max_val = -1
with pytest.raises(ValueError):
net = SSIMNet(max_val)
_ = SSIMNet(max_val)
def test_ssim_max_val_bool():
max_val = True
with pytest.raises(TypeError):
net = SSIMNet(max_val)
_ = SSIMNet(max_val)
def test_ssim_max_val_zero():
max_val = 0
with pytest.raises(ValueError):
net = SSIMNet(max_val)
_ = SSIMNet(max_val)
def test_ssim_filter_size_float():
with pytest.raises(TypeError):
net = SSIMNet(filter_size=1.1)
_ = SSIMNet(filter_size=1.1)
def test_ssim_filter_size_zero():
with pytest.raises(ValueError):
net = SSIMNet(filter_size=0)
_ = SSIMNet(filter_size=0)
def test_ssim_filter_sigma_zero():
with pytest.raises(ValueError):
net = SSIMNet(filter_sigma=0.0)
_ = SSIMNet(filter_sigma=0.0)
def test_ssim_filter_sigma_negative():
with pytest.raises(ValueError):
net = SSIMNet(filter_sigma=-0.1)
_ = SSIMNet(filter_sigma=-0.1)
def test_ssim_k1_k2_wrong_value():
with pytest.raises(ValueError):
net = SSIMNet(k1=1.1)
_ = SSIMNet(k1=1.1)
with pytest.raises(ValueError):
net = SSIMNet(k1=1.0)
_ = SSIMNet(k1=1.0)
with pytest.raises(ValueError):
net = SSIMNet(k1=0.0)
_ = SSIMNet(k1=0.0)
with pytest.raises(ValueError):
net = SSIMNet(k1=-1.0)
_ = SSIMNet(k1=-1.0)
with pytest.raises(ValueError):
net = SSIMNet(k2=1.1)
_ = SSIMNet(k2=1.1)
with pytest.raises(ValueError):
net = SSIMNet(k2=1.0)
_ = SSIMNet(k2=1.0)
with pytest.raises(ValueError):
net = SSIMNet(k2=0.0)
_ = SSIMNet(k2=0.0)
with pytest.raises(ValueError):
net = SSIMNet(k2=-1.0)
_ = SSIMNet(k2=-1.0)
def test_ssim_different_shape():

@ -64,13 +64,13 @@ class BatchNormTester(nn.Cell):
def test_batchnorm_train_onnx_export():
"test onnx export interface does not modify trainable flag of a network"
input = Tensor(np.ones([1, 3, 32, 32]).astype(np.float32) * 0.01)
input_ = Tensor(np.ones([1, 3, 32, 32]).astype(np.float32) * 0.01)
net = BatchNormTester(3)
net.set_train()
if not net.training:
raise ValueError('netowrk is not in training mode')
onnx_file = 'batch_norm.onnx'
export(net, input, file_name=onnx_file, file_format='ONNX')
export(net, input_, file_name=onnx_file, file_format='ONNX')
if not net.training:
raise ValueError('netowrk is not in training mode')
@ -172,6 +172,7 @@ net_cfgs = [
def get_id(cfg):
_ = cfg
return list(map(lambda x: x[0], net_cfgs))

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save