clean pylint

pull/1532/head
jinyaohui 5 years ago
parent 85e686e0b3
commit 86d197dfeb

@ -244,8 +244,8 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
return True return True
# pylint: disable=locally-disabled,too-many-arguments, too-many-locals, too-many-statements,
# pylint: disable=locally-disabled,too-many-arguments, too-many-locals, too-many-statements # pylint: disable=inconsistent-return-statements
# @util.check_input_type(dict, dict, (dict, NoneType), dict, bool, bool, str) # @util.check_input_type(dict, dict, (dict, NoneType), dict, bool, bool, str)
@op_info_register(matmul_cube_dense_left_op_info) @op_info_register(matmul_cube_dense_left_op_info)
def CusMatMulCubeDenseLeft(input_x1, input_x2, bias=None, output_y={}, trans_a=False, trans_b=False, def CusMatMulCubeDenseLeft(input_x1, input_x2, bias=None, output_y={}, trans_a=False, trans_b=False,

@ -40,6 +40,7 @@ matmul_cube_dense_right_op_info = TBERegOp("CusMatMulCubeDenseRight") \
.get_op_info() .get_op_info()
# pylint: disable=inconsistent-return-statements
@op_info_register(matmul_cube_dense_right_op_info) @op_info_register(matmul_cube_dense_right_op_info)
def CusMatMulCubeDenseRight(input_x1, input_x2, input_x3, bias=None, output_y={}, trans_a=False, trans_b=False, def CusMatMulCubeDenseRight(input_x1, input_x2, input_x3, bias=None, output_y={}, trans_a=False, trans_b=False,
kernel_name="matmulcube"): kernel_name="matmulcube"):

@ -31,6 +31,8 @@ from .transformer_model import TransformerModel
GRADIENT_CLIP_TYPE = 1 GRADIENT_CLIP_TYPE = 1
GRADIENT_CLIP_VALUE = 5.0 GRADIENT_CLIP_VALUE = 5.0
# pylint: disable=consider-using-in
class ClipGradients(nn.Cell): class ClipGradients(nn.Cell):
""" """
Clip gradients. Clip gradients.
@ -48,11 +50,12 @@ class ClipGradients(nn.Cell):
self.clip_by_norm = nn.ClipByNorm() self.clip_by_norm = nn.ClipByNorm()
self.cast = P.Cast() self.cast = P.Cast()
self.dtype = P.DType() self.dtype = P.DType()
def construct(self, def construct(self,
grads, grads,
clip_type, clip_type,
clip_value): clip_value):
#return grads # return grads
if clip_type != 0 and clip_type != 1: if clip_type != 0 and clip_type != 1:
return grads return grads
@ -83,8 +86,8 @@ class TransformerTrainingLoss(nn.Cell):
super(TransformerTrainingLoss, self).__init__(auto_prefix=False) super(TransformerTrainingLoss, self).__init__(auto_prefix=False)
self.vocab_size = config.vocab_size self.vocab_size = config.vocab_size
self.onehot = P.OneHot() self.onehot = P.OneHot()
self.on_value = Tensor(float(1-config.label_smoothing), mstype.float32) self.on_value = Tensor(float(1 - config.label_smoothing), mstype.float32)
self.off_value = Tensor(config.label_smoothing/float(self.vocab_size-1), mstype.float32) self.off_value = Tensor(config.label_smoothing / float(self.vocab_size - 1), mstype.float32)
self.reduce_sum = P.ReduceSum() self.reduce_sum = P.ReduceSum()
self.reduce_mean = P.ReduceMean() self.reduce_mean = P.ReduceMean()
self.reshape = P.Reshape() self.reshape = P.Reshape()
@ -92,7 +95,7 @@ class TransformerTrainingLoss(nn.Cell):
self.flatten = P.Flatten() self.flatten = P.Flatten()
self.neg = P.Neg() self.neg = P.Neg()
self.cast = P.Cast() self.cast = P.Cast()
self.flat_shape = (config.batch_size*config.seq_length,) self.flat_shape = (config.batch_size * config.seq_length,)
def construct(self, prediction_scores, label_ids, label_weights): def construct(self, prediction_scores, label_ids, label_weights):
"""Defines the computation performed.""" """Defines the computation performed."""
@ -217,10 +220,12 @@ class TransformerTrainOneStepCell(nn.Cell):
grad_scale = C.MultitypeFuncGraph("grad_scale") grad_scale = C.MultitypeFuncGraph("grad_scale")
reciprocal = P.Reciprocal() reciprocal = P.Reciprocal()
@grad_scale.register("Tensor", "Tensor") @grad_scale.register("Tensor", "Tensor")
def tensor_grad_scale(scale, grad): def tensor_grad_scale(scale, grad):
return grad * F.cast(reciprocal(scale), F.dtype(grad)) return grad * F.cast(reciprocal(scale), F.dtype(grad))
class TransformerTrainOneStepWithLossScaleCell(nn.Cell): class TransformerTrainOneStepWithLossScaleCell(nn.Cell):
""" """
Encapsulation class of Transformer network training. Encapsulation class of Transformer network training.

@ -34,6 +34,9 @@ GRADIENT_CLIP_VALUE = 1.0
_nn_clip_by_norm = nn.ClipByNorm() _nn_clip_by_norm = nn.ClipByNorm()
clip_grad = C.MultitypeFuncGraph("clip_grad") clip_grad = C.MultitypeFuncGraph("clip_grad")
# pylint: disable=consider-using-in
@clip_grad.register("Number", "Number", "Tensor") @clip_grad.register("Number", "Number", "Tensor")
def _clip_grad(clip_type, clip_value, grad): def _clip_grad(clip_type, clip_value, grad):
""" """
@ -57,6 +60,7 @@ def _clip_grad(clip_type, clip_value, grad):
new_grad = _nn_clip_by_norm(grad, F.cast(F.tuple_to_array((clip_value,)), dt)) new_grad = _nn_clip_by_norm(grad, F.cast(F.tuple_to_array((clip_value,)), dt))
return new_grad return new_grad
class GetMaskedLMOutput(nn.Cell): class GetMaskedLMOutput(nn.Cell):
""" """
Get masked lm output. Get masked lm output.
@ -377,6 +381,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell):
self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32), self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32),
name="loss_scale") name="loss_scale")
self.add_flags(has_effect=True) self.add_flags(has_effect=True)
def construct(self, def construct(self,
input_ids, input_ids,
input_mask, input_mask,

@ -15,14 +15,15 @@
"""Test bert submodules.""" """Test bert submodules."""
import numpy as np
import os import os
from mindspore import Tensor import numpy as np
from mindspore import nn, context
from mindspore.model_zoo.Bert_NEZHA import EmbeddingLookup, GetMaskedLMOutput, \ from mindspore.model_zoo.Bert_NEZHA import EmbeddingLookup, GetMaskedLMOutput, \
BertConfig, BertPreTraining, BertNetworkWithLoss BertConfig, BertPreTraining, BertNetworkWithLoss
from mindspore.model_zoo.Bert_NEZHA.bert_model import BertModel from mindspore.model_zoo.Bert_NEZHA.bert_model import BertModel
from mindspore import Tensor
from mindspore import nn, context
from ..mindspore_test import mindspore_test from ..mindspore_test import mindspore_test
from ..pipeline.forward.compile_forward import pipeline_for_compile_forward_anf_graph_for_case_by_case_config, \ from ..pipeline.forward.compile_forward import pipeline_for_compile_forward_anf_graph_for_case_by_case_config, \
pipeline_for_compile_forward_ge_graph_for_case_by_case_config pipeline_for_compile_forward_ge_graph_for_case_by_case_config

@ -15,9 +15,10 @@
"""Component that Check if the function raises the expected Exception.""" """Component that Check if the function raises the expected Exception."""
import pytest
import sys import sys
import pytest
from ...components.icomponent import IExectorComponent from ...components.icomponent import IExectorComponent
from ...utils import keyword from ...utils import keyword

@ -16,9 +16,10 @@
"""Implementation of Numerical gradients checking.""" """Implementation of Numerical gradients checking."""
# pylint: disable=missing-docstring # pylint: disable=missing-docstring
from typing import Callable, List, Any
import mindspore._c_expression as _c_expression import mindspore._c_expression as _c_expression
import numpy as np import numpy as np
from typing import Callable, List, Any
from mindspore import ParameterTuple from mindspore import ParameterTuple
from mindspore import Tensor from mindspore import Tensor

@ -15,9 +15,10 @@
"""Dataset utils.""" """Dataset utils."""
import numpy as np
import random import random
import numpy as np
from mindspore import Tensor from mindspore import Tensor

@ -24,8 +24,7 @@ from mindspore.ops import operations as P
from mindspore.ops._grad.grad_base import bprop_getters from mindspore.ops._grad.grad_base import bprop_getters
from mindspore.ops.primitive import prim_attr_register, PrimitiveWithInfer from mindspore.ops.primitive import prim_attr_register, PrimitiveWithInfer
logging.basicConfig(level=logging.DEBUG, format= logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] %(asctime)s %(pathname)s:%(lineno)d %(message)s')
'[%(levelname)s] %(asctime)s %(pathname)s:%(lineno)d %(message)s')
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

@ -14,9 +14,8 @@
# ============================================================================ # ============================================================================
"""Other utils.""" """Other utils."""
import mindspore._c_expression as _c_expression
import numpy as np import numpy as np
import mindspore._c_expression as _c_expression
from mindspore.common.tensor import Tensor from mindspore.common.tensor import Tensor

@ -34,6 +34,9 @@ GRADIENT_CLIP_VALUE = 1.0
_nn_clip_by_norm = nn.ClipByNorm() _nn_clip_by_norm = nn.ClipByNorm()
clip_grad = C.MultitypeFuncGraph("clip_grad") clip_grad = C.MultitypeFuncGraph("clip_grad")
# pylint: disable=consider-using-in
@clip_grad.register("Number", "Number", "Tensor") @clip_grad.register("Number", "Number", "Tensor")
def _clip_grad(clip_type, clip_value, grad): def _clip_grad(clip_type, clip_value, grad):
""" """
@ -57,6 +60,7 @@ def _clip_grad(clip_type, clip_value, grad):
new_grad = _nn_clip_by_norm(grad, F.cast(F.tuple_to_array((clip_value,)), dt)) new_grad = _nn_clip_by_norm(grad, F.cast(F.tuple_to_array((clip_value,)), dt))
return new_grad return new_grad
class GetMaskedLMOutput(nn.Cell): class GetMaskedLMOutput(nn.Cell):
""" """
Get masked lm output. Get masked lm output.
@ -377,6 +381,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell):
self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32), self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32),
name="loss_scale") name="loss_scale")
self.add_flags(has_effect=True) self.add_flags(has_effect=True)
def construct(self, def construct(self,
input_ids, input_ids,
input_mask, input_mask,

@ -23,35 +23,41 @@ from mindspore.ops import functional as F, composite as C
import mindspore.context as context import mindspore.context as context
import pytest import pytest
class TensorIntAutoCast(nn.Cell): class TensorIntAutoCast(nn.Cell):
def __init__(self,): def __init__(self, ):
super(TensorIntAutoCast, self).__init__() super(TensorIntAutoCast, self).__init__()
self.i = 2 self.i = 2
def construct(self, t): def construct(self, t):
z = F.tensor_mul(t, self.i) z = F.tensor_mul(t, self.i)
return z return z
class TensorFPAutoCast(nn.Cell): class TensorFPAutoCast(nn.Cell):
def __init__(self,): def __init__(self, ):
super(TensorFPAutoCast, self).__init__() super(TensorFPAutoCast, self).__init__()
self.f = 1.2 self.f = 1.2
def construct(self, t): def construct(self, t):
z = F.tensor_mul(t, self.f) z = F.tensor_mul(t, self.f)
return z return z
class TensorBoolAutoCast(nn.Cell): class TensorBoolAutoCast(nn.Cell):
def __init__(self,): def __init__(self, ):
super(TensorBoolAutoCast, self).__init__() super(TensorBoolAutoCast, self).__init__()
self.f = True self.f = True
def construct(self, t): def construct(self, t):
z = F.tensor_mul(t, self.f) z = F.tensor_mul(t, self.f)
return z return z
class TensorAutoCast(nn.Cell): class TensorAutoCast(nn.Cell):
def __init__(self,): def __init__(self, ):
super(TensorAutoCast, self).__init__() super(TensorAutoCast, self).__init__()
def construct(self, t1, t2): def construct(self, t1, t2):
z = F.tensor_mul(t1, t2) z = F.tensor_mul(t1, t2)
return z return z
@ -210,7 +216,6 @@ def test_tensor_auto_cast():
with pytest.raises(TypeError): with pytest.raises(TypeError):
net(t_uint64, t_fp64) net(t_uint64, t_fp64)
with pytest.raises(TypeError): with pytest.raises(TypeError):
tfp(t_uint16) tfp(t_uint16)
with pytest.raises(TypeError): with pytest.raises(TypeError):

@ -21,6 +21,7 @@ import mindspore.common.dtype as mstype
from mindspore import Tensor from mindspore import Tensor
from mindspore.ops import operations as P from mindspore.ops import operations as P
from mindspore import context from mindspore import context
context.set_context(mode=context.GRAPH_MODE, save_graphs=True) context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
@ -29,6 +30,7 @@ def test_cast_op_attr():
def __init__(self): def __init__(self):
super(CastNet, self).__init__() super(CastNet, self).__init__()
self.cast = P.Cast() self.cast = P.Cast()
def construct(self, x, t): def construct(self, x, t):
return self.cast(x, t) return self.cast(x, t)
@ -37,6 +39,7 @@ def test_cast_op_attr():
super(CastTypeTest, self).__init__() super(CastTypeTest, self).__init__()
self.net = net self.net = net
self.cast = P.Cast() self.cast = P.Cast()
def construct(self, x, y, z): def construct(self, x, y, z):
cast_op = self.cast cast_op = self.cast
t1 = cast_op(x, mstype.float32) t1 = cast_op(x, mstype.float32)
@ -46,6 +49,7 @@ def test_cast_op_attr():
t4 = cast_net(y, mstype.int32) t4 = cast_net(y, mstype.int32)
t5 = cast_net(z, mstype.float16) t5 = cast_net(z, mstype.float16)
return (t1, t2, t3, t4, t5) return (t1, t2, t3, t4, t5)
net = CastTypeTest(CastNet()) net = CastTypeTest(CastNet())
t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.int32)) t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.int32))
t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))

@ -142,4 +142,6 @@ def test_transpose():
assert (output[1].asnumpy() == expect1).all() assert (output[1].asnumpy() == expect1).all()
assert (output[2].asnumpy() == expect2).all() assert (output[2].asnumpy() == expect2).all()
assert (output[3].asnumpy() == expect3).all() assert (output[3].asnumpy() == expect3).all()
test_transpose() test_transpose()

@ -1043,6 +1043,7 @@ def test_print_tuple_wrapper(tag):
return fns[tag] return fns[tag]
# pylint: disable=unnecessary-semicolon
def test_constant_duplicate_mul(tag): def test_constant_duplicate_mul(tag):
fns = FnDict() fns = FnDict()
Mul = Primitive('Mul'); Mul = Primitive('Mul');

@ -152,7 +152,7 @@ def test_dict_set_item():
x = Tensor(np.ones([2, 2, 3], np.float32)) x = Tensor(np.ones([2, 2, 3], np.float32))
net = DictSetNet() net = DictSetNet()
out = net(x) _ = net(x)
# if the dictionary item does not exist, create a new one # if the dictionary item does not exist, create a new one
@ -168,4 +168,4 @@ def test_dict_set_item_create_new():
return my_dict return my_dict
x = Tensor(np.ones([2, 2, 3], np.float32)) x = Tensor(np.ones([2, 2, 3], np.float32))
net = DictSetNet() net = DictSetNet()
out = net(x) _ = net(x)

@ -81,31 +81,3 @@ def test_hypermap_func_const():
net = NetMap() net = NetMap()
assert net() == (8, 12, 16) assert net() == (8, 12, 16)
"""
def test_hypermap_func_variable():
class NetMap(Cell):
def __init__(self):
super(NetMap, self).__init__()
def double(self, x):
return 2 * x
def triple(self, x):
return 3 * x
def square(self, x):
return x * x
def construct(self, x):
_list = [self.double, self.triple, self.square]
return map(lambda f: f(x), _list)
x = Tensor(np.ones([3, 2, 3], np.float32))
net = NetMap()
with pytest.raises(RuntimeError) as ex:
net(x)
assert "HyperMap don't support Closure with free variable yet" in str(ex.value)
"""

@ -133,7 +133,7 @@ def test_list_append_2():
class ListOperate(nn.Cell): class ListOperate(nn.Cell):
def __init__(self, ): def __init__(self,):
super(ListOperate, self).__init__() super(ListOperate, self).__init__()
def construct(self, t, l): def construct(self, t, l):
@ -153,7 +153,7 @@ class ListOperate(nn.Cell):
class InListNet(nn.Cell): class InListNet(nn.Cell):
def __init__(self, ): def __init__(self,):
super(InListNet, self).__init__() super(InListNet, self).__init__()
self.list_ = [1, 2, 3, 4, 5, "ok"] self.list_ = [1, 2, 3, 4, 5, "ok"]

@ -53,7 +53,7 @@ class NestTupleGraphNet(nn.Cell):
class InTupleNet(nn.Cell): class InTupleNet(nn.Cell):
def __init__(self, ): def __init__(self,):
super(InTupleNet, self).__init__() super(InTupleNet, self).__init__()
self.tuple_ = (1, 2, 3, 4, 5, "ok") self.tuple_ = (1, 2, 3, 4, 5, "ok")

@ -99,4 +99,4 @@ def test_assignadd_scalar_cast():
net = AssignAddNet() net = AssignAddNet()
x = Tensor(np.ones([1]).astype(np.int64) * 102) x = Tensor(np.ones([1]).astype(np.int64) * 102)
# _executor.compile(net, 1) # _executor.compile(net, 1)
result = net(x) _ = net(x)

@ -429,9 +429,9 @@ def test_tensor_dtype_np_int64():
def test_tensor_dtype_fp32_to_bool(): def test_tensor_dtype_fp32_to_bool():
with pytest.raises(RuntimeError): with pytest.raises(RuntimeError):
input = np.random.randn(2, 3, 4, 5).astype(np.float32) input_ = np.random.randn(2, 3, 4, 5).astype(np.float32)
input = ms.Tensor(input) input_ = ms.Tensor(input_)
input_me = ms.Tensor(input, dtype=ms.bool_) _ = ms.Tensor(input_, dtype=ms.bool_)
def test_tensor_operation(): def test_tensor_operation():

@ -41,10 +41,10 @@ class Func(nn.Cell):
def construct(self, x, y): def construct(self, x, y):
init = self.alloc_status() init = self.alloc_status()
sum = add(x, y) sum_ = add(x, y)
product = mul1(x, y) product = mul1(x, y)
flag = self.get_status(init) flag = self.get_status(init)
out = add2(sum, product) out = add2(sum_, product)
clear = self.clear_status(flag) clear = self.clear_status(flag)
out = F.depend(out, clear) out = F.depend(out, clear)
return out return out
@ -88,7 +88,7 @@ def test_sens():
sens = Tensor(np.ones([3, 3]).astype(np.float32)) sens = Tensor(np.ones([3, 3]).astype(np.float32))
net = Net() net = Net()
net.add_flags(has_effect=True) net.add_flags(has_effect=True)
out = net(x, y, sens) _ = net(x, y, sens)
class Net_hyper(nn.Cell): class Net_hyper(nn.Cell):
@ -119,7 +119,7 @@ def test_hyper_add():
sens = Tensor(np.ones([3, 3]).astype(np.float32)) sens = Tensor(np.ones([3, 3]).astype(np.float32))
net = Net_hyper() net = Net_hyper()
net.add_flags(has_effect=True) net.add_flags(has_effect=True)
out = net(x, y, sens) _ = net(x, y, sens)
def test_keep_order_io_effect_exception_return_dtype(): def test_keep_order_io_effect_exception_return_dtype():

@ -148,9 +148,6 @@ def test_cast():
_executor.compile(net, x) _executor.compile(net, x)
"""test grad of PReLU, which cause AddN(generated by grad) fail"""
class IRBlockZ(nn.Cell): class IRBlockZ(nn.Cell):
def __init__(self, inplanes, planes): def __init__(self, inplanes, planes):
super(IRBlockZ, self).__init__() super(IRBlockZ, self).__init__()

@ -46,6 +46,7 @@ class MaxNet(nn.Cell):
kernel_size, kernel_size,
stride=None, stride=None,
padding=0): padding=0):
_ = padding
super(MaxNet, self).__init__() super(MaxNet, self).__init__()
self.maxpool = nn.MaxPool2d(kernel_size, self.maxpool = nn.MaxPool2d(kernel_size,
stride) stride)
@ -73,5 +74,5 @@ class Avg1dNet(nn.Cell):
def test_avg1d(): def test_avg1d():
net = Avg1dNet(6, 1) net = Avg1dNet(6, 1)
input = Tensor(np.random.randint(0, 255, [1, 3, 6]).astype(np.float32)) input_ = Tensor(np.random.randint(0, 255, [1, 3, 6]).astype(np.float32))
_executor.compile(net, input) _executor.compile(net, input_)

@ -52,19 +52,19 @@ def test_compile_psnr_grayscale():
def test_psnr_max_val_negative(): def test_psnr_max_val_negative():
max_val = -1 max_val = -1
with pytest.raises(ValueError): with pytest.raises(ValueError):
net = PSNRNet(max_val) _ = PSNRNet(max_val)
def test_psnr_max_val_bool(): def test_psnr_max_val_bool():
max_val = True max_val = True
with pytest.raises(TypeError): with pytest.raises(TypeError):
net = PSNRNet(max_val) _ = PSNRNet(max_val)
def test_psnr_max_val_zero(): def test_psnr_max_val_zero():
max_val = 0 max_val = 0
with pytest.raises(ValueError): with pytest.raises(ValueError):
net = PSNRNet(max_val) _ = PSNRNet(max_val)
def test_psnr_different_shape(): def test_psnr_different_shape():

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save