align the default value of some configuration for fleet to that of single cards (#30740)

* update, test=develop
revert-31068-fix_conv3d_windows
lilong12 4 years ago committed by GitHub
parent a373aa7645
commit dc8dfba35b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -141,9 +141,9 @@ message DistributedStrategy {
optional bool fuse_all_reduce_ops = 18 [ default = true ];
optional int32 fuse_grad_size_in_MB = 19 [ default = 32 ];
optional float fuse_grad_size_in_TFLOPS = 20 [ default = 50 ];
optional bool cudnn_exhaustive_search = 21 [ default = true ];
optional bool cudnn_exhaustive_search = 21 [ default = false ];
optional int32 conv_workspace_size_limit = 22 [ default = 512 ];
optional bool cudnn_batchnorm_spatial_persistent = 23 [ default = true ];
optional bool cudnn_batchnorm_spatial_persistent = 23 [ default = false ];
optional bool adaptive_localsgd = 24 [ default = false ];
optional bool fp16_allreduce = 25 [ default = false ];
optional bool sharding = 26 [ default = false ];

@ -118,6 +118,22 @@ class DistributedStrategy(object):
"""
self.strategy = distributed_strategy_pb2.DistributedStrategy()
# Set the default values of the following flags to the ones set by users
key = 'FLAGS_cudnn_batchnorm_spatial_persistent'
if core.globals().is_public(key):
self.strategy.cudnn_batchnorm_spatial_persistent = bool(
core.globals()[key])
key = 'FLAGS_conv_workspace_size_limit'
if core.globals().is_public(key):
self.strategy.conv_workspace_size_limit = int(core.globals()[key])
key = 'FLAGS_cudnn_exhaustive_search'
if core.globals().is_public(key):
self.strategy.cudnn_exhaustive_search = bool(core.globals()[key])
key = 'FLAGS_sync_nccl_allreduce'
if core.globals().is_public(key):
self.strategy.sync_nccl_allreduce = bool(core.globals()[key])
self.__lock_attr = True
def __setattr__(self, key, value):

Loading…
Cancel
Save