open compare_reduce_and_allreduce test (#15258)

test=develop
revert-15207-remove_op_handle_lock_and_fix_var
chengduo 7 years ago committed by GitHub
parent fd85418329
commit 0e178033d3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -74,7 +74,11 @@ class TestMNIST(TestParallelExecutorBase):
label = np.ones(shape=[32, 1], dtype='int64')
return img, label
def _compare_reduce_and_allreduce(self, model, use_cuda):
def _compare_reduce_and_allreduce(self,
model,
use_cuda,
delta1=1e-6,
delta2=1e-4):
if use_cuda and not core.is_compiled_with_cuda():
return
@ -95,9 +99,9 @@ class TestMNIST(TestParallelExecutorBase):
use_reduce=True)
for loss in zip(all_reduce_first_loss, reduce_first_loss):
self.assertAlmostEqual(loss[0], loss[1], delta=1e-6)
self.assertAlmostEqual(loss[0], loss[1], delta=delta1)
for loss in zip(all_reduce_last_loss, reduce_last_loss):
self.assertAlmostEqual(loss[0], loss[1], delta=1e-4)
self.assertAlmostEqual(loss[0], loss[1], delta=delta2)
# simple_fc
def check_simple_fc_convergence(self, use_cuda, use_reduce=False):
@ -174,8 +178,9 @@ class TestMNIST(TestParallelExecutorBase):
self.check_batchnorm_fc_convergence(use_cuda, use_fast_executor)
def test_batchnorm_fc_with_new_strategy(self):
# FIXME(zcd): close this test temporally.
# self._compare_reduce_and_allreduce(fc_with_batchnorm, True)
# NOTE: the computation result of nccl_reduce is non-deterministic,
# related issue: https://github.com/NVIDIA/nccl/issues/157
self._compare_reduce_and_allreduce(fc_with_batchnorm, True, 1e-5, 1e-3)
self._compare_reduce_and_allreduce(fc_with_batchnorm, False)

Loading…
Cancel
Save