support dp run single card (#29358)

revert-31562-mean
ShenLiang 4 years ago committed by GitHub
parent 1decf4ada6
commit 4064354a01
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -179,6 +179,9 @@ class Fleet(object):
fleet.init(strategy=strategy) fleet.init(strategy=strategy)
""" """
if strategy is None:
strategy = DistributedStrategy()
self._user_defined_strategy = copy.deepcopy(strategy)
if role_maker is None: if role_maker is None:
if isinstance(is_collective, bool): if isinstance(is_collective, bool):
@ -220,10 +223,6 @@ class Fleet(object):
else: else:
paddle.distributed.init_parallel_env() paddle.distributed.init_parallel_env()
if strategy is None:
strategy = DistributedStrategy()
self._user_defined_strategy = copy.deepcopy(strategy)
def is_first_worker(self): def is_first_worker(self):
""" """
Check whether the node is the first instance of worker. Check whether the node is the first instance of worker.

@ -395,11 +395,10 @@ class DataParallel(layers.Layer):
1024) 1024)
self.init_reducer() self.init_reducer()
else: else:
warnings.warn( warnings.warn("The program will return to single-card operation. "
"nranks is less than 2, " "Please check 1, whether you use spawn or fleetrun "
"maybe you need to check the current system environment." "to start the program. 2. Whether it is a multi-card "
" Need to use spawn or fleetrun to " "program. 3. Is the current environment multi-card.")
"start distributed programs.")
def init_reducer(self): def init_reducer(self):
layers_param = [] layers_param = []

Loading…
Cancel
Save