Merge pull request #9704 from lgone2000/fixunittest

fix pool and conv3d unittest when paddle is not build with gpu
wangkuiyi-patch-2
Tao Luo 7 years ago committed by GitHub
commit 90abd9dfeb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -97,15 +97,18 @@ class TestConv3dOp(OpTest):
} }
self.outputs = {'Output': output} self.outputs = {'Output': output}
def testcudnn(self):
return core.is_compiled_with_cuda() and self.use_cudnn
def test_check_output(self): def test_check_output(self):
if self.use_cudnn: if self.testcudnn():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5) self.check_output_with_place(place, atol=1e-5)
else: else:
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
if self.use_cudnn: if self.testcudnn():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, place,
@ -117,7 +120,7 @@ class TestConv3dOp(OpTest):
set(['Input', 'Filter']), 'Output', max_relative_error=0.03) set(['Input', 'Filter']), 'Output', max_relative_error=0.03)
def test_check_grad_no_filter(self): def test_check_grad_no_filter(self):
if self.use_cudnn: if self.testcudnn():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, ['Input'], place, ['Input'],
@ -132,7 +135,7 @@ class TestConv3dOp(OpTest):
no_grad_set=set(['Filter'])) no_grad_set=set(['Filter']))
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
if self.use_cudnn: if self.testcudnn():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, ['Filter'], place, ['Filter'],

@ -109,8 +109,11 @@ class TestPool2d_Op(OpTest):
self.outputs = {'Out': output} self.outputs = {'Out': output}
def testcudnn(self):
return core.is_compiled_with_cuda() and self.use_cudnn
def test_check_output(self): def test_check_output(self):
if self.use_cudnn: if self.testcudnn():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5) self.check_output_with_place(place, atol=1e-5)
else: else:
@ -119,7 +122,7 @@ class TestPool2d_Op(OpTest):
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
if self.use_cudnn and self.pool_type != "max": if self.testcudnn() and self.pool_type != "max":
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, set(['X']), 'Out', max_relative_error=0.07) place, set(['X']), 'Out', max_relative_error=0.07)

@ -118,15 +118,18 @@ class TestPool3d_Op(OpTest):
self.outputs = {'Out': output.astype('float32')} self.outputs = {'Out': output.astype('float32')}
def testcudnn(self):
return core.is_compiled_with_cuda() and self.use_cudnn
def test_check_output(self): def test_check_output(self):
if self.use_cudnn: if self.testcudnn():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5) self.check_output_with_place(place, atol=1e-5)
else: else:
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
if self.use_cudnn and self.pool_type != "max": if self.testcudnn() and self.pool_type != "max":
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, set(['X']), 'Out', max_relative_error=0.07) place, set(['X']), 'Out', max_relative_error=0.07)

Loading…
Cancel
Save