!6338 Fixing pylint for operators

Merge pull request !6338 from zhangzheng/master
pull/6338/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit 8785eef1ee

@ -678,16 +678,13 @@ def get_bprop_top_kv2(self):
def bprop(input_x, k, out, dout):
# (n1, n2, ...., n_p), in_lastdim = n_p
in_shape = shape_op(input_x)
in_lastdim = in_shape[-1]
# (n_1, ... n_(p-1), k), ind_lastdim = k
indices = out[1]
ind_shape = shape_op(indices)
ind_lastdim = ind_shape[-1]
# (n_1*n_2..*n_(p-1), k), outerdim = n_1*n_2..*n_(p-1)
ind_2d = reshape_op(indices, (-1, ind_lastdim))
outerdim = shape_op(ind_2d)[0]

@ -71,7 +71,6 @@ def correction_mul(x, batch_std, running_std, y, channel, kernel_name="correctio
if not inp_dtype in check_list:
raise RuntimeError("Dtype of input only support float16, float32")
# shape = util.shape_refine(shape)
x_t = tvm.placeholder(shape, name="x", dtype=inp_dtype)
shape_c = [1] * len(shape)
shape_c[channel] = batch_std.get("ori_shape")[0]

@ -60,7 +60,6 @@ def fake_quant_perchannel_compute(x, min_val, max_val, y, quant_min, quant_max,
quant_min = te.lang.cce.broadcast(quant_min, minmax_shape, x.dtype)
quant_max = te.lang.cce.broadcast(quant_max, minmax_shape, x.dtype)
# CalNudge(NudgeMinMax)
scale = te.lang.cce.vdiv(te.lang.cce.vsub(
max_val, min_val), te.lang.cce.vsub(quant_max, quant_min))
zp_from_min = te.lang.cce.vsub(quant_min, te.lang.cce.vdiv(min_val, scale))

@ -87,7 +87,6 @@ def fake_quant_perchannel_grad_compute(dout, x, min_val, max_val, quant_min, qua
quant_min = te.lang.cce.broadcast(quant_min, minmax_shape, x.dtype)
quant_max = te.lang.cce.broadcast(quant_max, minmax_shape, x.dtype)
# CalNudge(NudgeMinMax)
scale = te.lang.cce.vdiv(te.lang.cce.vsub(
max_val, min_val), te.lang.cce.vsub(quant_max, quant_min))
zp_from_min = te.lang.cce.vsub(quant_min, te.lang.cce.vdiv(min_val, scale))

@ -61,7 +61,6 @@ def fake_quant_per_layer_compute(x, min_val, max_val, y, quant_min, quant_max, s
max_val = te.lang.cce.vmax(te.lang.cce.vmuls(min_val, -1.), max_val)
min_val = te.lang.cce.vmuls(max_val, -1.)
# CalNudge(NudgeMinMax)
scale = te.lang.cce.vdiv(te.lang.cce.vsub(
max_val, min_val), te.lang.cce.vsub(quant_max, quant_min))
zp_from_min = te.lang.cce.vsub(quant_min, te.lang.cce.vdiv(min_val, scale))

@ -92,7 +92,6 @@ def fake_quant_per_layer_grad_compute(dout, x, min_val, max_val, quant_min, quan
max_val = te.lang.cce.vmax(te.lang.cce.vmuls(min_val, -1.), max_val)
min_val = te.lang.cce.vmuls(max_val, -1.)
# CalNudge(NudgeMinMax)
scale = te.lang.cce.vdiv(te.lang.cce.vsub(
max_val, min_val), te.lang.cce.vsub(quant_max, quant_min))
zp_from_min = te.lang.cce.vsub(quant_min, te.lang.cce.vdiv(min_val, scale))

@ -91,20 +91,16 @@ def check_tensor_setitem_index(index, element_type=None):
"""Checks tuple index type of tensor assignment."""
if index is None:
raise IndexError("Tensor's index cannot be None.")
# eg. Tensor[Slice] = u
if isinstance(index, slice):
return True
# eg. Tensor[tuple] = u
if isinstance(index, tuple):
if not index:
raise IndexError("Tensor's index cannot be empty.")
# eg. Tensor[tuple(Slice,...)] = u
for item in index:
if not isinstance(item, (slice, type(...), int)):
raise IndexError(
"Index of type '{}' is not supported yet.".format(type(item)))
return True
# eg. Tensor[Tensor[dtype=bool]] = u
if isinstance(index, mstype.tensor_type):
if element_type is None or element_type != mstype.bool_:
raise TypeError(

@ -305,23 +305,19 @@ def _tensor_setitem_with_slice_v1(data, input_slice, value):
@setitem.register("Tensor", "Number", "Number")
def _tensor_setitem_with_int_v1(data, index, value):
"""Syntax: A[1] = 3"""
return compile_utils.tensor_setitem_by_number_with_number(data, index, value)
@setitem.register("Tensor", "Number", "Tensor")
def _tensor_setitem_with_int_v2(data, index, value):
"""Syntax: A[1] = Tensor"""
return compile_utils.tensor_setitem_by_number_with_tensor(data, index, value)
@setitem.register("Tensor", "Ellipsis", "Number")
def _tensor_setitem_with_ellipsis_v1(data, index, value):
"""Syntax: A[...] = number."""
return compile_utils.tensor_setitem_by_ellipsis_with_number(data, index, value)
@setitem.register("Tensor", "Ellipsis", "Tensor")
def _tensor_setitem_with_ellipsis_v2(data, index, value):
"""Syntax: A[...] = Tensor."""
return compile_utils.tensor_setitem_by_ellipsis_with_tensor(data, index, value)

@ -320,8 +320,6 @@ class CusMatMulCube(PrimitiveWithInfer):
from mindspore.ops._op_impl._custom_op.matmul_cube_impl import CusMatMulCube
def infer_shape(self, data1_shape, data2_shape):
# shape = [1, data1_shape[1], data2_shape[2], 16, 16]
# return shape
if self.transpose_a:
k1, m = data1_shape
else:

@ -2068,7 +2068,6 @@ def _compute_slicing_length(begin, end, stride, x_shape, i):
if 0 <= begin < x_dim:
begin += -x_dim
if begin >= x_dim:
# When slicing backward, if begin >= x_dim, set begin = -1, which means start from the last element.
begin = -1
if 0 <= end < x_dim:
end += -x_dim

@ -745,7 +745,6 @@ class BNTrainingUpdate(PrimitiveWithInfer):
def __init__(self, isRef=True, epsilon=1e-5, factor=0.1):
self.init_prim_io_names(inputs=['x', 'sum', 'square_sum', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'running_mean', 'running_variance', 'save_mean', 'save_inv_variance'])
#self.isRef = validator.check_integer('isRef', isRef, [0, 1], Rel.IN)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, 'BNTrainingUpdate')
self.factor = validator.check_number_range('factor', factor, 0, 1, Rel.INC_BOTH, 'BNTrainingUpdate')

Loading…
Cancel
Save