|
|
|
@ -32,9 +32,7 @@ from mindspore.nn.optim import Momentum
|
|
|
|
|
from mindspore.ops import operations as P
|
|
|
|
|
from mindspore.nn import TrainOneStepCell, WithLossCell
|
|
|
|
|
from mindspore.nn import Dense
|
|
|
|
|
from mindspore.common.initializer import initializer
|
|
|
|
|
|
|
|
|
|
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
|
|
|
|
from mindspore import amp
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def random_normal_init(shape, mean=0.0, stddev=0.01, seed=None):
|
|
|
|
@ -326,6 +324,7 @@ def resnet50(num_classes):
|
|
|
|
|
@pytest.mark.platform_x86_gpu_training
|
|
|
|
|
@pytest.mark.env_onecard
|
|
|
|
|
def test_trainTensor(num_classes=10, epoch=8, batch_size=1):
|
|
|
|
|
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
|
|
|
|
net = resnet50(num_classes)
|
|
|
|
|
lr = 0.1
|
|
|
|
|
momentum = 0.9
|
|
|
|
@ -341,3 +340,26 @@ def test_trainTensor(num_classes=10, epoch=8, batch_size=1):
|
|
|
|
|
loss = train_network(data, label)
|
|
|
|
|
losses.append(loss)
|
|
|
|
|
assert(losses[-1].asnumpy() < 1)
|
|
|
|
|
|
|
|
|
|
@pytest.mark.level0
|
|
|
|
|
@pytest.mark.platform_x86_gpu_training
|
|
|
|
|
@pytest.mark.env_onecard
|
|
|
|
|
def test_trainTensor_amp(num_classes=10, epoch=18, batch_size=16):
|
|
|
|
|
context.set_context(mode=context.GRAPH_MODE, device_target="GPU", enable_mem_reuse=False,
|
|
|
|
|
enable_dynamic_memory=False)
|
|
|
|
|
net = resnet50(num_classes)
|
|
|
|
|
lr = 0.1
|
|
|
|
|
momentum = 0.9
|
|
|
|
|
optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, momentum)
|
|
|
|
|
criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
|
|
|
|
|
train_network = amp.build_train_network(net, optimizer, criterion, level="O2")
|
|
|
|
|
train_network.set_train()
|
|
|
|
|
losses = []
|
|
|
|
|
for i in range(0, epoch):
|
|
|
|
|
data = Tensor(np.ones([batch_size, 3, 224, 224]).astype(np.float32) * 0.01)
|
|
|
|
|
label = Tensor(np.ones([batch_size]).astype(np.int32))
|
|
|
|
|
loss = train_network(data, label)
|
|
|
|
|
losses.append(loss)
|
|
|
|
|
assert(losses[-1][0].asnumpy() < 1)
|
|
|
|
|
assert(losses[-1][1].asnumpy() == False)
|
|
|
|
|
assert(losses[-1][2].asnumpy() > 1)
|
|
|
|
|