use paddle.is_compile_with_cuda (#27586)

* modify doc
my_2.0rc
Zhang Ting 4 years ago committed by GitHub
parent 2f9cdd9038
commit a4c25b2f34
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -257,6 +257,7 @@ from .tensor.stat import numel #DEFINE_ALIAS
from .device import get_cudnn_version
from .device import set_device
from .device import get_device
from .device import is_compiled_with_cuda #DEFINE_ALIAS
from .device import is_compiled_with_xpu
from .device import XPUPlace
# from .tensor.tensor import Tensor #DEFINE_ALIAS

@ -18,6 +18,7 @@ import re
from paddle.fluid import core
from paddle.fluid import framework
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.framework import is_compiled_with_cuda #DEFINE_ALIAS
__all__ = [
'get_cudnn_version',
@ -31,7 +32,7 @@ __all__ = [
# 'cuda_places',
# 'CUDAPinnedPlace',
# 'CUDAPlace',
# 'is_compiled_with_cuda'
'is_compiled_with_cuda'
]
_cudnn_version = None

@ -360,13 +360,13 @@ def is_compiled_with_cuda():
"""
Whether this whl package can be used to run the model on GPU.
Returns (bool): support gpu or not.
Returns (bool): `True` if CUDA is currently available, otherwise `False`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
support_gpu = fluid.is_compiled_with_cuda()
import paddle
support_gpu = paddle.is_compiled_with_cuda()
"""
return core.is_compiled_with_cuda()

Loading…
Cancel
Save