add some pynative st test_cases in gpu env

pull/9178/head
lvchangquan 4 years ago
parent cf0bca06eb
commit cabefafa13

File diff suppressed because it is too large Load Diff

@ -126,19 +126,19 @@ class LayerNormFactory(OpsFactory):
return input_grad[0][0].asnumpy(), input_grad[1][1].asnumpy(), input_grad[1][0].asnumpy() return input_grad[0][0].asnumpy(), input_grad[1][1].asnumpy(), input_grad[1][0].asnumpy()
def forward_cmp(self): def forward_cmp(self):
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_context(mode=context.GRAPH_MODE, device_target=context.get_context('device_target'))
graph_out = self.forward_mindspore_impl() graph_out = self.forward_mindspore_impl()
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") context.set_context(mode=context.PYNATIVE_MODE, device_target=context.get_context('device_target'))
pynative_out = self.forward_mindspore_impl() pynative_out = self.forward_mindspore_impl()
allclose_nparray(graph_out[0], pynative_out[0], self.loss, self.loss) allclose_nparray(graph_out[0], pynative_out[0], self.loss, self.loss)
def grad_cmp(self): def grad_cmp(self):
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_context(mode=context.GRAPH_MODE, device_target=context.get_context('device_target'))
graph_grad1, graph_grad2, graph_grad3 = self.grad_mindspore_impl() graph_grad1, graph_grad2, graph_grad3 = self.grad_mindspore_impl()
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") context.set_context(mode=context.PYNATIVE_MODE, device_target=context.get_context('device_target'))
pynative_grad1, pynative_grad2, pynative_grad3 = self.grad_mindspore_impl() pynative_grad1, pynative_grad2, pynative_grad3 = self.grad_mindspore_impl()
allclose_nparray(graph_grad1, pynative_grad1, self.loss, self.loss) allclose_nparray(graph_grad1, pynative_grad1, self.loss, self.loss)
@ -197,30 +197,52 @@ class ArgMaxWithValueFactory(OpsFactory):
allclose_nparray(out_numpy[1], out_mindspore[1], self.loss, self.loss) allclose_nparray(out_numpy[1], out_mindspore[1], self.loss, self.loss)
def grad_cmp(self): def grad_cmp(self):
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_context(mode=context.GRAPH_MODE, device_target=context.get_context('device_target'))
graph_grad = self.grad_mindspore_impl() graph_grad = self.grad_mindspore_impl()
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") context.set_context(mode=context.PYNATIVE_MODE, device_target=context.get_context('device_target'))
pynative_grad = self.grad_mindspore_impl() pynative_grad = self.grad_mindspore_impl()
allclose_nparray(graph_grad, pynative_grad, self.loss, self.loss) allclose_nparray(graph_grad, pynative_grad, self.loss, self.loss)
@pytest.mark.level0 def layernorm_input():
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_layernorm_input():
fact = LayerNormFactory(input_shape=(1, 128, 1024), norm_shape=(1024,), gamma_shape=(1024,), beta_shape=(1024,), fact = LayerNormFactory(input_shape=(1, 128, 1024), norm_shape=(1024,), gamma_shape=(1024,), beta_shape=(1024,),
norm_axis=2, params_axis=2, dtype=np.float16) norm_axis=2, params_axis=2, dtype=np.float16)
fact.forward_cmp() fact.forward_cmp()
fact.loss = 5e-3 fact.loss = 5e-3
fact.grad_cmp() fact.grad_cmp()
def argmaxwithvalue_input():
fact = ArgMaxWithValueFactory(input_shape=[1024, 1024], axis=-1, keep_dims=False)
fact.forward_cmp()
fact.grad_cmp()
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_argmaxwithvalue_input(): def test_layernorm_input_ascend():
fact = ArgMaxWithValueFactory(input_shape=[1024, 1024], axis=-1, keep_dims=False) context.set_context(device_target="Ascend")
fact.forward_cmp() layernorm_input()
fact.grad_cmp()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_layernorm_input_gpu():
context.set_context(device_target="GPU")
layernorm_input()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_argmaxwithvalue_input_ascend():
context.set_context(device_target="Ascend")
argmaxwithvalue_input()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_argmaxwithvalue_input_gpu():
context.set_context(device_target="GPU")
argmaxwithvalue_input()

@ -85,7 +85,7 @@ def allclose_nparray(data_expected, data_me, rtol, atol, equal_nan=True):
else: else:
assert True assert True
def mixed_precision_multiple_cells_01(): def mixed_precision_multiple_cells_temp_01():
np.random.seed(1) np.random.seed(1)
x = np.random.randn(1, 3, 28, 28).astype(np.float32) x = np.random.randn(1, 3, 28, 28).astype(np.float32)
net = ReluTanhSoftmax() net = ReluTanhSoftmax()
@ -95,7 +95,7 @@ def mixed_precision_multiple_cells_01():
out_me_relu_01, out_me_tanh_01, out_me_softmax_01 = net(Tensor(x)) out_me_relu_01, out_me_tanh_01, out_me_softmax_01 = net(Tensor(x))
return out_me_relu_01, out_me_tanh_01, out_me_softmax_01 return out_me_relu_01, out_me_tanh_01, out_me_softmax_01
def mixed_precision_multiple_cells_02(): def mixed_precision_multiple_cells_temp_02():
np.random.seed(1) np.random.seed(1)
x = np.random.randn(1, 3, 28, 28).astype(np.float32) x = np.random.randn(1, 3, 28, 28).astype(np.float32)
net = ReluTanhSoftmax() net = ReluTanhSoftmax()
@ -105,7 +105,7 @@ def mixed_precision_multiple_cells_02():
out_me_relu_02, out_me_tanh_02, out_me_softmax_02 = net(Tensor(x)) out_me_relu_02, out_me_tanh_02, out_me_softmax_02 = net(Tensor(x))
return out_me_relu_02, out_me_tanh_02, out_me_softmax_02 return out_me_relu_02, out_me_tanh_02, out_me_softmax_02
def mixed_precision_multiple_cells_03(): def mixed_precision_multiple_cells_temp_03():
np.random.seed(1) np.random.seed(1)
x = np.random.randn(1, 3, 28, 28).astype(np.float32) x = np.random.randn(1, 3, 28, 28).astype(np.float32)
net = ReluTanhAdd() net = ReluTanhAdd()
@ -115,45 +115,78 @@ def mixed_precision_multiple_cells_03():
out_me = net(Tensor(x)) out_me = net(Tensor(x))
return out_me return out_me
@pytest.mark.level0 def mixed_precision_multiples_cell_01():
@pytest.mark.platform_arm_ascend_training context.set_context(mode=context.GRAPH_MODE, device_target=context.get_context('device_target'))
@pytest.mark.platform_x86_ascend_training graph_relu_01, graph_tanh_01, graph_softmax_01 = mixed_precision_multiple_cells_temp_01()
@pytest.mark.env_onecard
def test_mixed_precision_multiples_cell_01():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
graph_relu_01, graph_tanh_01, graph_softmax_01 = mixed_precision_multiple_cells_01()
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") context.set_context(mode=context.PYNATIVE_MODE, device_target=context.get_context('device_target'))
pynative_relu_01, pynative_tanh_01, pynative_softmax_01 = mixed_precision_multiple_cells_01() pynative_relu_01, pynative_tanh_01, pynative_softmax_01 = mixed_precision_multiple_cells_temp_01()
allclose_nparray(graph_relu_01.asnumpy(), pynative_relu_01.asnumpy(), 0.001, 0.001) allclose_nparray(graph_relu_01.asnumpy(), pynative_relu_01.asnumpy(), 0.001, 0.001)
allclose_nparray(graph_tanh_01.asnumpy(), pynative_tanh_01.asnumpy(), 0.001, 0.001) allclose_nparray(graph_tanh_01.asnumpy(), pynative_tanh_01.asnumpy(), 0.001, 0.001)
allclose_nparray(graph_softmax_01.asnumpy(), pynative_softmax_01.asnumpy(), 0.001, 0.001) allclose_nparray(graph_softmax_01.asnumpy(), pynative_softmax_01.asnumpy(), 0.001, 0.001)
@pytest.mark.level0 def mixed_precision_multiples_cell_02():
@pytest.mark.platform_arm_ascend_training context.set_context(mode=context.GRAPH_MODE, device_target=context.get_context('device_target'))
@pytest.mark.platform_x86_ascend_training graph_relu_02, graph_tanh_02, graph_softmax_02 = mixed_precision_multiple_cells_temp_02()
@pytest.mark.env_onecard
def test_mixed_precision_multiples_cell_02():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
graph_relu_02, graph_tanh_02, graph_softmax_02 = mixed_precision_multiple_cells_02()
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") context.set_context(mode=context.PYNATIVE_MODE, device_target=context.get_context('device_target'))
pynative_relu_02, pynative_tanh_02, pynative_softmax_02 = mixed_precision_multiple_cells_02() pynative_relu_02, pynative_tanh_02, pynative_softmax_02 = mixed_precision_multiple_cells_temp_02()
allclose_nparray(graph_relu_02.asnumpy(), pynative_relu_02.asnumpy(), 0.001, 0.001) allclose_nparray(graph_relu_02.asnumpy(), pynative_relu_02.asnumpy(), 0.001, 0.001)
allclose_nparray(graph_tanh_02.asnumpy(), pynative_tanh_02.asnumpy(), 0.001, 0.001) allclose_nparray(graph_tanh_02.asnumpy(), pynative_tanh_02.asnumpy(), 0.001, 0.001)
allclose_nparray(graph_softmax_02.asnumpy(), pynative_softmax_02.asnumpy(), 0.001, 0.001) allclose_nparray(graph_softmax_02.asnumpy(), pynative_softmax_02.asnumpy(), 0.001, 0.001)
def mixed_precision_multiples_cell_03():
context.set_context(mode=context.GRAPH_MODE, device_target=context.get_context('device_target'))
graph_output_03 = mixed_precision_multiple_cells_temp_03()
context.set_context(mode=context.PYNATIVE_MODE, device_target=context.get_context('device_target'))
pynative_output_03 = mixed_precision_multiple_cells_temp_03()
allclose_nparray(graph_output_03.asnumpy(), pynative_output_03.asnumpy(), 0.001, 0.001)
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_mixed_precision_multiples_cell_03(): def test_mixed_precision_multiples_cell_ascend_01():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_context(device_target="Ascend")
graph_output_03 = mixed_precision_multiple_cells_03() mixed_precision_multiples_cell_01()
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") @pytest.mark.level0
pynative_output_03 = mixed_precision_multiple_cells_03() @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_mixed_precision_multiples_cell_gpu_01():
context.set_context(device_target="GPU")
mixed_precision_multiples_cell_01()
allclose_nparray(graph_output_03.asnumpy(), pynative_output_03.asnumpy(), 0.001, 0.001) @pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_mixed_precision_multiples_cell_ascend_02():
context.set_context(device_target="Ascend")
mixed_precision_multiples_cell_02()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_mixed_precision_multiples_cell_gpu_02():
context.set_context(device_target="GPU")
mixed_precision_multiples_cell_02()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_mixed_precision_multiples_cell_ascend_03():
context.set_context(device_target="Ascend")
mixed_precision_multiples_cell_03()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_mixed_precision_multiples_cell_gpu_03():
context.set_context(device_target="GPU")
mixed_precision_multiples_cell_03()

Loading…
Cancel
Save