!1475 Fixing some tiny faults about Pylint in ME code

Merge pull request !1475 from liuwenhao/master
pull/1475/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit d8ea87e352

@ -27,8 +27,8 @@ class Net(nn.Cell):
super(Net, self).__init__()
self.add = P.TensorAdd()
def construct(self, x, y):
return self.add(x, y)
def construct(self, x_, y_):
return self.add(x_, y_)
x = np.ones([1, 3, 3, 4]).astype(np.float32)

@ -31,8 +31,8 @@ class Net(nn.Cell):
# 'normal', [2, 3, 3, 4]), name='dout')
@ms_function
def construct(self, dout):
return self.bias_add_grad(dout)
def construct(self, dout_):
return self.bias_add_grad(dout_)
dout = np.ones([2, 3, 4, 4]).astype(np.float32)

@ -34,8 +34,8 @@ class Net(nn.Cell):
self.get_shape = P.Shape()
@ms_function
def construct(self, x, out):
return self.conv2d_grad(out, x, self.get_shape(self.y))
def construct(self, x_, out_):
return self.conv2d_grad(out_, x_, self.get_shape(self.y))
x = Tensor(np.array([[[

@ -29,9 +29,9 @@ class Net(nn.Cell):
self.mask = P.DropoutGenMask(10, 28)
self.shape = P.Shape()
def construct(self, x, y):
shape_x = self.shape(x)
return self.mask(shape_x, y)
def construct(self, x_, y_):
shape_x = self.shape(x_)
return self.mask(shape_x, y_)
x = np.ones([2, 4, 2, 2]).astype(np.int32)

@ -27,8 +27,8 @@ class Net(nn.Cell):
super(Net, self).__init__()
self.equal_count = P.EqualCount()
def construct(self, x, y):
return self.equal_count(x, y)
def construct(self, x_, y_):
return self.equal_count(x_, y_)
x = np.random.randn(32).astype(np.int32)

@ -29,8 +29,8 @@ class Net(nn.Cell):
self.matmul = P.MatMul()
@ms_function
def construct(self, x1, x2):
return self.matmul(x1, x2)
def construct(self, x1_, x2_):
return self.matmul(x1_, x2_)
x1 = np.random.randn(1, 3).astype(np.float32)

@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore.common.api import ms_function

@ -63,7 +63,7 @@ def test_net():
expect = loss_np
SparseSoftmaxCrossEntropyWithLogits = Net()
loss_me = SparseSoftmaxCrossEntropyWithLogits(Tensor(logits), Tensor(labels))
'''assert'''
# assert
assert np.allclose(expect.flatten(), loss_me.asnumpy().flatten(), 0.01, 0.01)
print(loss_me.asnumpy().flatten())
print("-------------------------")

@ -25,8 +25,8 @@ class Net(nn.Cell):
super(Net, self).__init__()
self.add = P.TensorAdd()
def construct(self, x, y):
return self.add(x, y)
def construct(self, x_, y_):
return self.add(x_, y_)
x = np.random.randn(1, 3, 3, 4).astype(np.float32)

@ -65,12 +65,10 @@ def test_conv2d_backprop_filter():
conv2d_filter = Net()
output = conv2d_filter()
print("================================")
"""
expect output:
[[[[ -60, -142, -265]
[-104, -211, -322]
[-102, -144, -248]]]]
"""
# expect output:
# [[[[ -60, -142, -265]
# [-104, -211, -322]
# [-102, -144, -248]]]]
expect = np.array([[[[-60, -142, -265],
[-104, -211, -322],
[-102, -144, -248]]]]).astype(np.float32)

@ -64,15 +64,13 @@ def test_conv2d_backprop_input():
conv2d_input = Net()
output = conv2d_input()
print("================================")
"""
expect output:
[[[[ -5, -4, 5, 12, 0, -8]
[-15, -6, 17, 17, -2, -11]
[-15, -8, 13, 12, 2, -4]
[-13, -6, 8, -14, 5, 20]
[ -3, -4, -4, -19, 7, 23]
[ -3, -2, 0, -14, 3, 16]]]]
"""
# expect output:
# [[[[ -5, -4, 5, 12, 0, -8]
# [-15, -6, 17, 17, -2, -11]
# [-15, -8, 13, 12, 2, -4]
# [-13, -6, 8, -14, 5, 20]
# [ -3, -4, -4, -19, 7, 23]
# [ -3, -2, 0, -14, 3, 16]]]]
expect = np.array([[[[-5, -4, 5, 12, 0, -8],
[-15, -6, 17, 17, -2, -11],
[-15, -8, 13, 12, 2, -4],

@ -59,7 +59,7 @@ def gelu_backward_cmp(input_shape):
class MEGeluLargeIn(Cell):
def __init__(self):
super(GELU, self).__init__()
super(MEGeluLargeIn, self).__init__()
self.matmul = P.MatMul()
self.gelu = P.Gelu()
@ -79,7 +79,7 @@ class GradLargeIn(Cell):
def gelu_backward_me_large_in_impl(x1, x2, output_grad):
n = GradLargeIn()
n = GELU()
grad_with_sense = GradLargeIn(n)
grad_with_sense.set_train()
input_grad = grad_with_sense(x1, x2, output_grad)

@ -29,8 +29,8 @@ class Net(nn.Cell):
self.less = P.Less()
@ms_function
def construct(self, x1, x2):
return self.less(x1, x2)
def construct(self, x1_, x2_):
return self.less(x1_, x2_)
x1 = np.random.randn(3, 4).astype(np.float16)

@ -29,8 +29,8 @@ class Net(nn.Cell):
self.less_equal = P.LessEqual()
@ms_function
def construct(self, x1, x2):
return self.less_equal(x1, x2)
def construct(self, x1_, x2_):
return self.less_equal(x1_, x2_)
x1 = np.random.randn(3, 4).astype(np.float16)

@ -28,8 +28,8 @@ class Net(nn.Cell):
self.logical_and = P.LogicalAnd()
@ms_function
def construct(self, x1, x2):
return self.logical_and(x1, x2)
def construct(self, x1_, x2_):
return self.logical_and(x1_, x2_)
x1 = [True, True, False, False, True, True, False, False]

@ -28,8 +28,8 @@ class Net(nn.Cell):
self.logical_not = P.LogicalNot()
@ms_function
def construct(self, x1):
return self.logical_not(x1)
def construct(self, x):
return self.logical_not(x)
x1 = [True, True, False, False, True, True, False, False]

@ -28,8 +28,8 @@ class Net(nn.Cell):
self.logical_or = P.LogicalOr()
@ms_function
def construct(self, x1, x2):
return self.logical_or(x1, x2)
def construct(self, x1_, x2_):
return self.logical_or(x1_, x2_)
x1 = [True, True, False, False, True, True, False, False]

@ -27,8 +27,8 @@ class Net(nn.Cell):
self.matmul = P.MatMul()
@ms_function
def construct(self, x1, x2):
return self.matmul(x1, x2)
def construct(self, x1_, x2_):
return self.matmul(x1_, x2_)
x1 = np.random.randn(1, 3).astype(np.float32)

@ -29,8 +29,8 @@ class Net(nn.Cell):
self.matmul = P.MatMul(transpose_b=True)
@ms_function
def construct(self, x1, x2):
return self.matmul(x1, x2)
def construct(self, x1_, x2_):
return self.matmul(x1_, x2_)
x1 = np.random.randn(10, 1).astype(np.float32)

@ -44,15 +44,15 @@ class GradWrap(Cell):
return gout
def gen_data(inputA_np, inputB_np, grad=None):
def gen_data(inputA_np, inputB_np, grad_=None):
inputA_me = inputA_np
if isinstance(inputA_np, np.ndarray):
inputA_me = Tensor(inputA_me)
inputB_me = inputB_np
if isinstance(inputB_np, np.ndarray):
inputB_me = Tensor(inputB_np)
if grad is None:
grad = np.random.randn(2).astype(np.float32)
if grad_ is None:
grad_ = np.random.randn(2).astype(np.float32)
print("----inputA---")
print(inputA_np)
print("----inputB---")
@ -60,7 +60,7 @@ def gen_data(inputA_np, inputB_np, grad=None):
net_me = GradWrap(MaxNetMe())
net_me.set_train()
output = net_me(inputA_me, inputB_me, Tensor(grad))
output = net_me(inputA_me, inputB_me, Tensor(grad_))
print("---me---")
print(output[0].asnumpy())
print(output[1].asnumpy())

@ -44,7 +44,7 @@ class GradWrap(Cell):
return gout
def gen_data(inputA_np, inputB_np, grad=None):
def gen_data(inputA_np, inputB_np, grad_=None):
inputA_me = inputA_np
if isinstance(inputA_np, np.ndarray):
inputA_me = Tensor(inputA_me)
@ -53,12 +53,12 @@ def gen_data(inputA_np, inputB_np, grad=None):
if isinstance(inputB_np, np.ndarray):
inputB_me = Tensor(inputB_np)
if grad is None:
grad = np.random.randn(1, 3, 2, 2).astype(np.float32)
if grad_ is None:
grad_ = np.random.randn(1, 3, 2, 2).astype(np.float32)
print(inputA_np)
print(inputB_np)
print(grad)
print(grad_)
net_me = GradWrap(MinNetMe())
net_me.set_train()

@ -31,8 +31,8 @@ class Grad(nn.Cell):
self.network = network
@ms_function
def construct(self, input, output_grad):
return self.grad(self.network)(input, output_grad)
def construct(self, inputValue, output_grad):
return self.grad(self.network)(inputValue, output_grad)
class Net(nn.Cell):

@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import sys
import numpy as np
import mindspore.context as context
import mindspore.dataset as ds
@ -31,8 +31,8 @@ SCHEMA_DIR = "{0}/resnet_all_datasetSchema.json".format(data_path)
def test_me_de_train_dataset():
data_list = ["{0}/train-00001-of-01024.data".format(data_path)]
data_set = ds.TFRecordDataset(data_list, schema=SCHEMA_DIR,
columns_list=["image/encoded", "image/class/label"])
data_set_new = ds.TFRecordDataset(data_list, schema=SCHEMA_DIR,
columns_list=["image/encoded", "image/class/label"])
resize_height = 224
resize_width = 224
@ -42,21 +42,21 @@ def test_me_de_train_dataset():
# define map operations
decode_op = vision.Decode()
resize_op = vision.Resize(resize_height, resize_width,
resize_op = vision.Resize((resize_height, resize_width),
Inter.LINEAR) # Bilinear as default
rescale_op = vision.Rescale(rescale, shift)
# apply map operations on images
data_set = data_set.map(input_columns="image/encoded", operations=decode_op)
data_set = data_set.map(input_columns="image/encoded", operations=resize_op)
data_set = data_set.map(input_columns="image/encoded", operations=rescale_op)
data_set_new = data_set_new.map(input_columns="image/encoded", operations=decode_op)
data_set_new = data_set_new.map(input_columns="image/encoded", operations=resize_op)
data_set_new = data_set_new.map(input_columns="image/encoded", operations=rescale_op)
hwc2chw_op = vision.HWC2CHW()
data_set = data_set.map(input_columns="image/encoded", operations=hwc2chw_op)
data_set = data_set.repeat(1)
data_set_new = data_set_new.map(input_columns="image/encoded", operations=hwc2chw_op)
data_set_new = data_set_new.repeat(1)
# apply batch operations
batch_size = 32
data_set = data_set.batch(batch_size, drop_remainder=True)
return data_set
batch_size_new = 32
data_set_new = data_set_new.batch(batch_size_new, drop_remainder=True)
return data_set_new
def convert_type(shapes, types):

@ -14,10 +14,10 @@
# ============================================================================
import pytest
import numpy as np
from mindspore import Tensor
from mindspore.ops import operations as P
import mindspore.nn as nn
import numpy as np
import mindspore.context as context
from mindspore.common import dtype as mstype

@ -68,12 +68,10 @@ def test_conv2d_backprop_filter():
conv2d_filter = Net4()
output = conv2d_filter()
print("================================")
"""
expect output:
[[[[ -60, -142, -265]
[-104, -211, -322]
[-102, -144, -248]]]]
"""
# expect output:
# [[[[ -60, -142, -265]
# [-104, -211, -322]
# [-102, -144, -248]]]]
expect = np.array([[[[-60, -142, -265],
[-104, -211, -322],
[-102, -144, -248]]]]).astype(np.float32)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save