!1277 Fixing some tiny faults about Pylint in ME code

Merge pull request !1277 from liuwenhao/master
pull/1277/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit 831ceba6eb

@ -17,9 +17,6 @@ import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
@ -47,4 +44,4 @@ def test_net():
expect = 3.0
add = Net()
output = add(x, y)
assert (output == expect)
assert output == expect

@ -17,7 +17,6 @@ import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.ops import operations as P
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
@ -37,7 +36,7 @@ def test_net_bool():
net = Net()
output = net(Tensor(x), -1)
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.expand_dims(x, -1)))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
def test_net_int8():
@ -45,7 +44,7 @@ def test_net_int8():
net = Net()
output = net(Tensor(x), -1)
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.expand_dims(x, -1)))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
def test_net_uint8():
@ -53,7 +52,7 @@ def test_net_uint8():
net = Net()
output = net(Tensor(x), -1)
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.expand_dims(x, -1)))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
def test_net_int16():
@ -61,7 +60,7 @@ def test_net_int16():
net = Net()
output = net(Tensor(x), -1)
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.expand_dims(x, -1)))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
def test_net_uint16():
@ -69,7 +68,7 @@ def test_net_uint16():
net = Net()
output = net(Tensor(x), -1)
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.expand_dims(x, -1)))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
def test_net_int32():
@ -77,7 +76,7 @@ def test_net_int32():
net = Net()
output = net(Tensor(x), -1)
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.expand_dims(x, -1)))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
def test_net_uint32():
@ -85,7 +84,7 @@ def test_net_uint32():
net = Net()
output = net(Tensor(x), -1)
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.expand_dims(x, -1)))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
def test_net_int64():
@ -93,7 +92,7 @@ def test_net_int64():
net = Net()
output = net(Tensor(x), -1)
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.expand_dims(x, -1)))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
def test_net_uint64():
@ -101,7 +100,7 @@ def test_net_uint64():
net = Net()
output = net(Tensor(x), -1)
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.expand_dims(x, -1)))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
def test_net_float16():
@ -109,7 +108,7 @@ def test_net_float16():
net = Net()
output = net(Tensor(x), -1)
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.expand_dims(x, -1)))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
def test_net_float32():
@ -117,7 +116,7 @@ def test_net_float32():
net = Net()
output = net(Tensor(x), -1)
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.expand_dims(x, -1)))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
def test_net_float64():
@ -125,4 +124,4 @@ def test_net_float64():
net = Net()
output = net(Tensor(x), -1)
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.expand_dims(x, -1)))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))

@ -36,7 +36,7 @@ def test_net_int8():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.flatten()))
assert np.all(output.asnumpy() == x.flatten())
def test_net_uint8():
@ -44,7 +44,7 @@ def test_net_uint8():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.flatten()))
assert np.all(output.asnumpy() == x.flatten())
def test_net_int16():
@ -52,7 +52,7 @@ def test_net_int16():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.flatten()))
assert np.all(output.asnumpy() == x.flatten())
def test_net_uint16():
@ -60,7 +60,7 @@ def test_net_uint16():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.flatten()))
assert np.all(output.asnumpy() == x.flatten())
def test_net_int32():
@ -68,7 +68,7 @@ def test_net_int32():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.flatten()))
assert np.all(output.asnumpy() == x.flatten())
def test_net_uint32():
@ -76,7 +76,7 @@ def test_net_uint32():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.flatten()))
assert np.all(output.asnumpy() == x.flatten())
def test_net_int64():
@ -84,7 +84,7 @@ def test_net_int64():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.flatten()))
assert np.all(output.asnumpy() == x.flatten())
def test_net_uint64():
@ -92,7 +92,7 @@ def test_net_uint64():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.flatten()))
assert np.all(output.asnumpy() == x.flatten())
def test_net_float16():
@ -100,7 +100,7 @@ def test_net_float16():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.flatten()))
assert np.all(output.asnumpy() == x.flatten())
def test_net_float32():
@ -108,4 +108,4 @@ def test_net_float32():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.flatten()))
assert np.all(output.asnumpy() == x.flatten())

@ -17,7 +17,6 @@ import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.ops import operations as P
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
@ -37,7 +36,7 @@ def test_net_bool():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.isfinite(x)))
assert np.all(output.asnumpy() == np.isfinite(x))
def test_net_int8():
@ -45,7 +44,7 @@ def test_net_int8():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.isfinite(x)))
assert np.all(output.asnumpy() == np.isfinite(x))
def test_net_uint8():
@ -53,7 +52,7 @@ def test_net_uint8():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.isfinite(x)))
assert np.all(output.asnumpy() == np.isfinite(x))
def test_net_int16():
@ -61,7 +60,7 @@ def test_net_int16():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.isfinite(x)))
assert np.all(output.asnumpy() == np.isfinite(x))
def test_net_uint16():
@ -69,7 +68,7 @@ def test_net_uint16():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.isfinite(x)))
assert np.all(output.asnumpy() == np.isfinite(x))
def test_net_int32():
@ -77,7 +76,7 @@ def test_net_int32():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.isfinite(x)))
assert np.all(output.asnumpy() == np.isfinite(x))
def test_net_uint32():
@ -85,7 +84,7 @@ def test_net_uint32():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.isfinite(x)))
assert np.all(output.asnumpy() == np.isfinite(x))
def test_net_int64():
@ -93,7 +92,7 @@ def test_net_int64():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.isfinite(x)))
assert np.all(output.asnumpy() == np.isfinite(x))
def test_net_uint64():
@ -101,7 +100,7 @@ def test_net_uint64():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.isfinite(x)))
assert np.all(output.asnumpy() == np.isfinite(x))
def test_net_float16():
@ -109,7 +108,7 @@ def test_net_float16():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.isfinite(x)))
assert np.all(output.asnumpy() == np.isfinite(x))
def test_net_float32():
@ -117,7 +116,7 @@ def test_net_float32():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.isfinite(x)))
assert np.all(output.asnumpy() == np.isfinite(x))
def test_net_float64():
@ -125,4 +124,4 @@ def test_net_float64():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.isfinite(x)))
assert np.all(output.asnumpy() == np.isfinite(x))

@ -17,7 +17,6 @@ import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.ops import operations as P
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
@ -37,7 +36,7 @@ def test_net_bool():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.reshape(x, (4, 4))))
assert np.all(output.asnumpy() == np.reshape(x, (4, 4)))
def test_net_int8():
@ -45,7 +44,7 @@ def test_net_int8():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.reshape(x, (4, 4))))
assert np.all(output.asnumpy() == np.reshape(x, (4, 4)))
def test_net_uint8():
@ -53,7 +52,7 @@ def test_net_uint8():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.reshape(x, (4, 4))))
assert np.all(output.asnumpy() == np.reshape(x, (4, 4)))
def test_net_int16():
@ -61,7 +60,7 @@ def test_net_int16():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.reshape(x, (4, 4))))
assert np.all(output.asnumpy() == np.reshape(x, (4, 4)))
def test_net_uint16():
@ -69,7 +68,7 @@ def test_net_uint16():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.reshape(x, (4, 4))))
assert np.all(output.asnumpy() == np.reshape(x, (4, 4)))
def test_net_int32():
@ -77,7 +76,7 @@ def test_net_int32():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.reshape(x, (4, 4))))
assert np.all(output.asnumpy() == np.reshape(x, (4, 4)))
def test_net_uint32():
@ -85,7 +84,7 @@ def test_net_uint32():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.reshape(x, (4, 4))))
assert np.all(output.asnumpy() == np.reshape(x, (4, 4)))
def test_net_int64():
@ -93,7 +92,7 @@ def test_net_int64():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.reshape(x, (4, 4))))
assert np.all(output.asnumpy() == np.reshape(x, (4, 4)))
def test_net_uint64():
@ -101,7 +100,7 @@ def test_net_uint64():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.reshape(x, (4, 4))))
assert np.all(output.asnumpy() == np.reshape(x, (4, 4)))
def test_net_float16():
@ -109,7 +108,7 @@ def test_net_float16():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.reshape(x, (4, 4))))
assert np.all(output.asnumpy() == np.reshape(x, (4, 4)))
def test_net_float32():
@ -117,7 +116,7 @@ def test_net_float32():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.reshape(x, (4, 4))))
assert np.all(output.asnumpy() == np.reshape(x, (4, 4)))
def test_net_float64():
@ -125,4 +124,4 @@ def test_net_float64():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == np.reshape(x, (4, 4))))
assert np.all(output.asnumpy() == np.reshape(x, (4, 4)))

@ -36,7 +36,7 @@ def test_net_bool():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.squeeze()))
assert np.all(output.asnumpy() == x.squeeze())
def test_net_int8():
@ -44,7 +44,7 @@ def test_net_int8():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.squeeze()))
assert np.all(output.asnumpy() == x.squeeze())
def test_net_uint8():
@ -52,7 +52,7 @@ def test_net_uint8():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.squeeze()))
assert np.all(output.asnumpy() == x.squeeze())
def test_net_int16():
@ -60,7 +60,7 @@ def test_net_int16():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.squeeze()))
assert np.all(output.asnumpy() == x.squeeze())
def test_net_uint16():
@ -68,7 +68,7 @@ def test_net_uint16():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.squeeze()))
assert np.all(output.asnumpy() == x.squeeze())
def test_net_int32():
@ -76,7 +76,7 @@ def test_net_int32():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.squeeze()))
assert np.all(output.asnumpy() == x.squeeze())
def test_net_uint32():
@ -84,7 +84,7 @@ def test_net_uint32():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.squeeze()))
assert np.all(output.asnumpy() == x.squeeze())
def test_net_int64():
@ -92,7 +92,7 @@ def test_net_int64():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.squeeze()))
assert np.all(output.asnumpy() == x.squeeze())
def test_net_uint64():
@ -100,7 +100,7 @@ def test_net_uint64():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.squeeze()))
assert np.all(output.asnumpy() == x.squeeze())
def test_net_float16():
@ -108,7 +108,7 @@ def test_net_float16():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.squeeze()))
assert np.all(output.asnumpy() == x.squeeze())
def test_net_float32():
@ -116,7 +116,7 @@ def test_net_float32():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.squeeze()))
assert np.all(output.asnumpy() == x.squeeze())
def test_net_float64():
@ -124,4 +124,4 @@ def test_net_float64():
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert (np.all(output.asnumpy() == x.squeeze()))
assert np.all(output.asnumpy() == x.squeeze())

@ -12,12 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
@ -34,11 +31,11 @@ class Net(nn.Cell):
self.accumulation = Parameter(initializer(
'normal', [2, 3, 3, 4]), name='accumulation')
self.learning_rate = Parameter(initializer(
'normal', [1, ]), name='learning_rate')
'normal', [1,]), name='learning_rate')
self.gradient = Parameter(initializer(
'normal', [2, 3, 3, 4]), name='gradient')
self.momentum = Parameter(initializer(
'normal', [1, ]), name='momentum')
'normal', [1,]), name='momentum')
def construct(self):
return self.apply_momentum(self.variable, self.accumulation, self.learning_rate, self.gradient, self.momentum)

@ -18,9 +18,6 @@ import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
from mindspore.ops.operations import _grad_ops as G
context.set_context(device_target="Ascend")

@ -16,11 +16,7 @@ import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
from mindspore.ops.operations import _grad_ops as G
context.set_context(device_target="Ascend")

@ -18,7 +18,6 @@ import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
from mindspore.ops.operations import _grad_ops as G

@ -33,8 +33,8 @@ class Grad(nn.Cell):
self.network = network
@ms_function
def construct(self, input, output_grad):
return self.grad(self.network)(input, output_grad)
def construct(self, input_, output_grad):
return self.grad(self.network)(input_, output_grad)
class Net(nn.Cell):

@ -18,9 +18,6 @@ import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
context.set_context(device_target="Ascend")

@ -18,9 +18,6 @@ import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
from mindspore.ops.composite import GradOperation
context.set_context(device_target="Ascend")
@ -33,8 +30,8 @@ class Grad(nn.Cell):
self.network = network
@ms_function
def construct(self, input, output_grad):
return self.grad(self.network)(input, output_grad)
def construct(self, input_, output_grad):
return self.grad(self.network)(input_, output_grad)
class Net(nn.Cell):

@ -12,11 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.ops import operations as P

@ -17,7 +17,6 @@ import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P

@ -34,8 +34,8 @@ class Grad(nn.Cell):
self.network = network
@ms_function
def construct(self, input, output_grad):
return self.grad(self.network)(input, output_grad)
def construct(self, input_, output_grad):
return self.grad(self.network)(input_, output_grad)
class Net(nn.Cell):

@ -39,8 +39,8 @@ def test_image_gradients():
expected_dx = np.array([[[[1, 0], [1, 0]]]]).astype(np.int32)
net = Net()
dy, dx = net(image)
assert np.any(dx.asnumpy() - expected_dx) == False
assert np.any(dy.asnumpy() - expected_dy) == False
assert not np.any(dx.asnumpy() - expected_dx)
assert not np.any(dy.asnumpy() - expected_dy)
def test_image_gradients_multi_channel_depth():
@ -61,5 +61,5 @@ def test_image_gradients_multi_channel_depth():
net = Net()
dy, dx = net(image)
assert np.any(dx.asnumpy() - expected_dx.asnumpy()) == False
assert np.any(dy.asnumpy() - expected_dy.asnumpy()) == False
assert not np.any(dx.asnumpy() - expected_dx.asnumpy())
assert not np.any(dy.asnumpy() - expected_dy.asnumpy())

@ -18,8 +18,6 @@ import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
context.set_context(device_target="Ascend")

@ -31,8 +31,8 @@ class Grad(nn.Cell):
self.network = network
@ms_function
def construct(self, input, output_grad):
return self.grad(self.network)(input, output_grad)
def construct(self, input_, output_grad):
return self.grad(self.network)(input_, output_grad)
class Net(nn.Cell):

@ -16,7 +16,6 @@ import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
@ -43,7 +42,6 @@ class Net(nn.Cell):
def test_net():
x = np.random.randn(1, 64, 112, 112).astype(np.float32)
maxpool = Net()
output = maxpool()
print("***********output output*********")

@ -18,8 +18,6 @@ import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
from mindspore.ops.composite import GradOperation
@ -33,8 +31,8 @@ class Grad(nn.Cell):
self.network = network
@ms_function
def construct(self, input, output_grad):
return self.grad(self.network)(input, output_grad)
def construct(self, input_, output_grad):
return self.grad(self.network)(input_, output_grad)
class Net(nn.Cell):

@ -18,8 +18,6 @@ import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
context.set_context(device_target="Ascend")

@ -18,8 +18,6 @@ import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
from mindspore.ops.composite import GradOperation
@ -33,8 +31,8 @@ class Grad(nn.Cell):
self.network = network
@ms_function
def construct(self, input, output_grad):
return self.grad(self.network)(input, output_grad)
def construct(self, input_, output_grad):
return self.grad(self.network)(input_, output_grad)
class Net(nn.Cell):

@ -18,8 +18,6 @@ import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
context.set_context(device_target="Ascend")

@ -18,8 +18,6 @@ import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
from mindspore.ops.composite import GradOperation
@ -33,8 +31,8 @@ class Grad(nn.Cell):
self.network = network
@ms_function
def construct(self, input, output_grad):
return self.grad(self.network)(input, output_grad)
def construct(self, input_, output_grad):
return self.grad(self.network)(input_, output_grad)
class Net(nn.Cell):

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save