rename mirror_mean to gradients_mean

pull/5700/head
yao_yf 4 years ago
parent bc4c5afc1a
commit d4cfe55c04

@ -45,7 +45,7 @@ std::shared_ptr<ParallelContext> ParallelContext::GetInstance() {
ParallelContext::ParallelContext() { Reset(); }
void ParallelContext::Reset() {
mirror_mean_ = false;
gradients_mean_ = false;
full_batch_ = false;
gradient_fp32_sync_ = true;
loss_repeated_mean_ = true;
@ -74,7 +74,7 @@ void ParallelContext::set_global_rank(int32_t global_rank) {
global_rank_is_set_ = true;
}
void ParallelContext::set_mirror_mean(bool mirror_mean) { mirror_mean_ = mirror_mean; }
void ParallelContext::set_gradients_mean(bool gradients_mean) { gradients_mean_ = gradients_mean; }
void ParallelContext::set_full_batch(bool full_batch) { full_batch_ = full_batch; }

@ -52,8 +52,8 @@ class ParallelContext {
static std::shared_ptr<ParallelContext> GetInstance();
void set_mirror_mean(bool mirror_mean);
bool mirror_mean() const { return mirror_mean_; }
void set_gradients_mean(bool gradients_mean);
bool gradients_mean() const { return gradients_mean_; }
void set_full_batch(bool full_batch);
bool full_batch() const { return full_batch_; }
@ -107,7 +107,7 @@ class ParallelContext {
private:
ParallelContext();
static std::shared_ptr<ParallelContext> inst_context_;
bool mirror_mean_;
bool gradients_mean_;
bool full_batch_;
bool gradient_fp32_sync_;
bool loss_repeated_mean_;

@ -251,7 +251,7 @@ OperatorVector CreateMirrorOps(const std::string &group_name, size_t dev_num) {
MS_LOG(EXCEPTION) << "Invalid dev num: " << dev_num;
}
OperatorVector op_for_weight;
bool mean_flag = ParallelContext::GetInstance()->mirror_mean();
bool mean_flag = ParallelContext::GetInstance()->gradients_mean();
OperatorName operator_name = MIRROR_OPERATOR;
ValuePtr attr0_value = MakeValue(group_name);

@ -2488,7 +2488,7 @@ Status ParallelInit() {
}
MS_LOG(INFO) << "The parallel context: dev num: " << device_num << ", global rank: " << global_rank
<< ", backend: " << backend << ", mirror_mean: " << ParallelContext::GetInstance()->mirror_mean()
<< ", backend: " << backend << ", gradients_mean: " << ParallelContext::GetInstance()->gradients_mean()
<< ", gradient_fp32_sync: " << ParallelContext::GetInstance()->gradient_fp32_sync();
return SUCCESS;
}

@ -113,8 +113,8 @@ PYBIND11_MODULE(_c_expression, m) {
.def("get_global_rank", &ParallelContext::global_rank, "Get global rank.")
.def("set_global_rank", &ParallelContext::set_global_rank, "Set global rank.")
.def("get_global_rank_is_set", &ParallelContext::global_rank_is_set, "Get global rank is set.")
.def("get_mirror_mean", &ParallelContext::mirror_mean, "Get mirror mean.")
.def("set_mirror_mean", &ParallelContext::set_mirror_mean, "Set mirror mean.")
.def("get_gradients_mean", &ParallelContext::gradients_mean, "Get mirror mean.")
.def("set_gradients_mean", &ParallelContext::set_gradients_mean, "Set mirror mean.")
.def("get_gradient_fp32_sync", &ParallelContext::gradient_fp32_sync, "Get cast before mirror.")
.def("set_gradient_fp32_sync", &ParallelContext::set_gradient_fp32_sync, "Set cast before mirror.")
.def("get_loss_repeated_mean", &ParallelContext::loss_repeated_mean, "Get loss repeated mean.")

@ -323,7 +323,7 @@ def _context():
return _k_context
@args_type_check(device_num=int, global_rank=int, mirror_mean=bool, gradient_fp32_sync=bool, parallel_mode=str,
@args_type_check(device_num=int, global_rank=int, gradients_mean=bool, gradient_fp32_sync=bool, parallel_mode=str,
auto_parallel_search_mode=str, parameter_broadcast=bool, strategy_ckpt_load_file=str,
strategy_ckpt_save_file=str, full_batch=bool, enable_parallel_optimizer=bool)
def set_auto_parallel_context(**kwargs):
@ -341,8 +341,8 @@ def set_auto_parallel_context(**kwargs):
Args:
device_num (int): Available device number, the value must be in [1, 4096]. Default: 1.
global_rank (int): Global rank id, the value must be in [0, 4095]. Default: 0.
mirror_mean (bool): Whether to perform mean operator after all-reduce of mirror.
"stand_alone" do not support mirror_mean. Default: False.
gradients_mean (bool): Whether to perform mean operator after all-reduce of mirror.
"stand_alone" do not support gradients_mean. Default: False.
gradient_fp32_sync (bool): Gradients allreduce by fp32 even though gradients is fp16 if this flag is True..
"stand_alone", "data_parallel" and "hybrid_parallel" do not support
gradient_fp32_sync. Default: True.
@ -380,7 +380,7 @@ def set_auto_parallel_context(**kwargs):
Examples:
>>> context.set_auto_parallel_context(device_num=8)
>>> context.set_auto_parallel_context(global_rank=0)
>>> context.set_auto_parallel_context(mirror_mean=True)
>>> context.set_auto_parallel_context(gradients_mean=True)
>>> context.set_auto_parallel_context(gradient_fp32_sync=False)
>>> context.set_auto_parallel_context(parallel_mode="auto_parallel")
>>> context.set_auto_parallel_context(parameter_broadcast=False)
@ -412,7 +412,7 @@ def reset_auto_parallel_context():
- device_num: 1.
- global_rank: 0.
- mirror_mean: False.
- gradients_mean: False.
- gradient_fp32_sync: True.
- parallel_mode: "stand_alone".
- parameter_broadcast: False.

@ -13,7 +13,7 @@
# limitations under the License.
# ============================================================================
"""Cell_wrapper."""
from mindspore.parallel._utils import (_get_device_num, _get_mirror_mean,
from mindspore.parallel._utils import (_get_device_num, _get_gradients_mean,
_get_parallel_mode)
from mindspore.context import ParallelMode
from ...common import dtype as mstype
@ -190,7 +190,7 @@ class TrainOneStepCell(Cell):
if parallel_mode in (ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL):
self.reducer_flag = True
if self.reducer_flag:
mean = _get_mirror_mean()
mean = _get_gradients_mean()
degree = _get_device_num()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree)

@ -279,7 +279,7 @@ class DistributedGradReducer(Cell):
>>> ParallelMode.HYBRID_PARALLEL]:
>>> self.reducer_flag = True
>>> if self.reducer_flag:
>>> mean = context.get_auto_parallel_context("mirror_mean")
>>> mean = context.get_auto_parallel_context("gradients_mean")
>>> if mean.get_device_num_is_set():
>>> degree = context.get_auto_parallel_context("device_num")
>>> else:

@ -16,7 +16,7 @@
import mindspore.context as context
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
from mindspore.context import ParallelMode
from mindspore.parallel._utils import _get_device_num, _get_parallel_mode, _get_mirror_mean
from mindspore.parallel._utils import _get_device_num, _get_parallel_mode, _get_gradients_mean
from ..cell import Cell
from ...common import Tensor, RowTensor
from ...common.parameter import Parameter
@ -231,7 +231,7 @@ class TrainOneStepWithLossScaleCell(Cell):
self.grad_reducer = F.identity
self.reducer_flag = self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]
if self.reducer_flag:
mean = _get_mirror_mean()
mean = _get_gradients_mean()
degree = _get_device_num()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree)
self.is_distributed = self.parallel_mode != ParallelMode.STAND_ALONE

@ -95,23 +95,23 @@ class _AutoParallelContext:
self.check_context_handle()
return self._context_handle.get_global_rank()
def set_mirror_mean(self, mirror_mean):
def set_gradients_mean(self, gradients_mean):
"""
Set mirror_mean flag.
Set gradients_mean flag.
Note:
If mirror_mean is true, it will insert a div operator after parameter gradients allreduce.
If gradients_mean is true, it will insert a div operator after parameter gradients allreduce.
Args:
mirror_mean (bool): The mirror_mean flag.
gradients_mean (bool): The gradients_mean flag.
"""
self.check_context_handle()
self._context_handle.set_mirror_mean(mirror_mean)
self._context_handle.set_gradients_mean(gradients_mean)
def get_mirror_mean(self):
"""Get mirror_mean flag."""
def get_gradients_mean(self):
"""Get gradients_mean flag."""
self.check_context_handle()
return self._context_handle.get_mirror_mean()
return self._context_handle.get_gradients_mean()
def set_gradient_fp32_sync(self, gradient_fp32_sync):
"""
@ -453,7 +453,7 @@ def auto_parallel_context():
_set_auto_parallel_context_func_map = {
"device_num": auto_parallel_context().set_device_num,
"global_rank": auto_parallel_context().set_global_rank,
"mirror_mean": auto_parallel_context().set_mirror_mean,
"gradients_mean": auto_parallel_context().set_gradients_mean,
"gradient_fp32_sync": auto_parallel_context().set_gradient_fp32_sync,
"loss_repeated_mean": auto_parallel_context().set_loss_repeated_mean,
"parallel_mode": auto_parallel_context().set_parallel_mode,
@ -468,7 +468,7 @@ _set_auto_parallel_context_func_map = {
_get_auto_parallel_context_func_map = {
"device_num": auto_parallel_context().get_device_num,
"global_rank": auto_parallel_context().get_global_rank,
"mirror_mean": auto_parallel_context().get_mirror_mean,
"gradients_mean": auto_parallel_context().get_gradients_mean,
"gradient_fp32_sync": auto_parallel_context().get_gradient_fp32_sync,
"loss_repeated_mean": auto_parallel_context().get_loss_repeated_mean,
"parallel_mode": auto_parallel_context().get_parallel_mode,
@ -480,7 +480,7 @@ _get_auto_parallel_context_func_map = {
"enable_parallel_optimizer": auto_parallel_context().get_enable_parallel_optimizer}
@args_type_check(device_num=int, global_rank=int, mirror_mean=bool, gradient_fp32_sync=bool,
@args_type_check(device_num=int, global_rank=int, gradients_mean=bool, gradient_fp32_sync=bool,
loss_repeated_mean=bool, parallel_mode=str, auto_parallel_search_mode=str,
parameter_broadcast=bool, strategy_ckpt_load_file=str,
strategy_ckpt_save_file=str, full_batch=bool, enable_parallel_optimizer=bool)
@ -495,7 +495,7 @@ def _set_auto_parallel_context(**kwargs):
Args:
device_num (int): Available device number, the value must be in [1, 4096]. Default: 1.
global_rank (int): Global rank id, the value must be in [0, 4095]. Default: 0.
mirror_mean (bool): Whether to perform mean operator after all-reduce of mirror. Default: False.
gradients_mean (bool): Whether to perform mean operator after all-reduce of mirror. Default: False.
loss_repeated_mean (bool): Whether to perform mean operator in backward in the case of repeated
calculations. Default: True.
gradient_fp32_sync (bool): Gradients allreduce by fp32 even though gradients is fp16 if this flag is True.
@ -562,7 +562,7 @@ def _reset_auto_parallel_context():
- device_num: 1.
- global_rank: 0.
- mirror_mean: False.
- gradients_mean: False.
- gradient_fp32_sync: True.
- parallel_mode: "stand_alone".
- parameter_broadcast: False.

@ -88,9 +88,9 @@ def _to_full_tensor(elem, device_num, global_rank, scaling_sens=None):
lst.append(Tensor(scaling_sens, mstype.float32))
return tuple(lst)
def _get_mirror_mean():
"""Get if using mirror_mean."""
return auto_parallel_context().get_mirror_mean()
def _get_gradients_mean():
"""Get if using gradients_mean."""
return auto_parallel_context().get_gradients_mean()
def _get_device_num():

@ -66,7 +66,7 @@ def model_fine_tune(flags, train_net, fix_weight_layer):
para.requires_grad = False
if __name__ == "__main__":
if args_opt.distribute == "true":
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, mirror_mean=True)
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True)
init()
args_opt.base_size = config.crop_size
args_opt.crop_size = config.crop_size

@ -54,7 +54,7 @@ if __name__ == '__main__':
rank = args_opt.rank_id
device_num = args_opt.device_num
context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
mirror_mean=True, parameter_broadcast=True)
gradients_mean=True, parameter_broadcast=True)
init()
else:
rank = 0

@ -78,7 +78,7 @@ if __name__ == '__main__':
if device_num > 1:
context.reset_auto_parallel_context()
context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
mirror_mean=True)
gradients_mean=True)
init()
elif device_target == "GPU":
init()
@ -86,7 +86,7 @@ if __name__ == '__main__':
if device_num > 1:
context.reset_auto_parallel_context()
context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
mirror_mean=True)
gradients_mean=True)
else:
raise ValueError("Unsupported platform.")

@ -58,7 +58,7 @@ if __name__ == '__main__':
cfg.group_size = get_group_size()
parallel_mode = ParallelMode.DATA_PARALLEL
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=cfg.group_size,
parameter_broadcast=True, mirror_mean=True)
parameter_broadcast=True, gradients_mean=True)
else:
cfg.rank = 0
cfg.group_size = 1

@ -58,7 +58,7 @@ if __name__ == '__main__':
rank = args_opt.rank_id
device_num = args_opt.device_num
context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
mirror_mean=True, parameter_broadcast=True)
gradients_mean=True, parameter_broadcast=True)
init()
else:
rank = 0

@ -39,7 +39,7 @@ def context_device_init(config):
init("nccl")
context.set_auto_parallel_context(device_num=get_group_size(),
parallel_mode=ParallelMode.DATA_PARALLEL,
mirror_mean=True)
gradients_mean=True)
elif config.platform == "Ascend":
context.set_context(mode=context.GRAPH_MODE, device_target=config.platform, device_id=config.device_id,
@ -47,7 +47,7 @@ def context_device_init(config):
if config.run_distribute:
context.set_auto_parallel_context(device_num=config.rank_size,
parallel_mode=ParallelMode.DATA_PARALLEL,
parameter_broadcast=True, mirror_mean=True)
parameter_broadcast=True, gradients_mean=True)
auto_parallel_context().set_all_reduce_fusion_split_indices([140])
init()
else:

@ -57,7 +57,7 @@ elif args_opt.device_target == "GPU":
init()
context.set_auto_parallel_context(device_num=get_group_size(),
parallel_mode=ParallelMode.DATA_PARALLEL,
mirror_mean=True)
gradients_mean=True)
context.set_context(mode=context.GRAPH_MODE,
device_target="GPU",
save_graphs=False)
@ -77,7 +77,7 @@ def train_on_ascend():
context.set_auto_parallel_context(device_num=rank_size,
parallel_mode=ParallelMode.DATA_PARALLEL,
parameter_broadcast=True,
mirror_mean=True)
gradients_mean=True)
init()
# define network

@ -55,7 +55,7 @@ if args_opt.device_target == "GPU":
init()
context.set_auto_parallel_context(device_num=get_group_size(),
parallel_mode=ParallelMode.DATA_PARALLEL,
mirror_mean=True)
gradients_mean=True)
else:
raise ValueError("Unsupported device_target.")

@ -24,7 +24,7 @@ import mindspore.ops.composite as C
import mindspore.common.dtype as mstype
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
from mindspore.train.parallel_utils import ParallelMode
from mindspore.parallel._utils import _get_device_num, _get_parallel_mode, _get_mirror_mean
from mindspore.parallel._utils import _get_device_num, _get_parallel_mode, _get_gradients_mean
GRADIENT_CLIP_TYPE = 1
@ -921,7 +921,7 @@ class NASNetAMobileTrainOneStepWithClipGradient(nn.Cell):
if parallel_mode in (ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL):
self.reducer_flag = True
if self.reducer_flag:
mean = _get_mirror_mean()
mean = _get_gradients_mean()
degree = _get_device_num()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree)

@ -58,7 +58,7 @@ if __name__ == '__main__':
cfg.group_size = get_group_size()
parallel_mode = ParallelMode.DATA_PARALLEL
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=cfg.group_size,
parameter_broadcast=True, mirror_mean=True)
parameter_broadcast=True, gradients_mean=True)
else:
cfg.rank = 0
cfg.group_size = 1

@ -76,7 +76,7 @@ if __name__ == '__main__':
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(device_id=device_id, enable_auto_mixed_precision=True)
context.set_auto_parallel_context(device_num=args_opt.device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
mirror_mean=True)
gradients_mean=True)
if args_opt.net == "resnet50" or args_opt.net == "se-resnet50":
auto_parallel_context().set_all_reduce_fusion_split_indices([85, 160])
else:
@ -86,7 +86,7 @@ if __name__ == '__main__':
else:
init()
context.set_auto_parallel_context(device_num=get_group_size(), parallel_mode=ParallelMode.DATA_PARALLEL,
mirror_mean=True)
gradients_mean=True)
if args_opt.net == "resnet50":
auto_parallel_context().set_all_reduce_fusion_split_indices([85, 160])
ckpt_save_dir = config.save_checkpoint_path + "ckpt_" + str(get_rank()) + "/"

@ -76,11 +76,11 @@ if __name__ == '__main__':
context.set_auto_parallel_context(device_num=rank_size,
parallel_mode=ParallelMode.DATA_PARALLEL,
parameter_broadcast=True,
mirror_mean=True)
gradients_mean=True)
init()
context.set_auto_parallel_context(device_num=args_opt.device_num,
parallel_mode=ParallelMode.DATA_PARALLEL,
mirror_mean=True)
gradients_mean=True)
auto_parallel_context().set_all_reduce_fusion_split_indices([107, 160])
# define network

@ -129,7 +129,7 @@ class DistributedGradReducerThor(Cell):
>>> ParallelMode.HYBRID_PARALLEL]:
>>> self.reducer_flag = True
>>> if self.reducer_flag:
>>> mean = context.get_auto_parallel_context("mirror_mean")
>>> mean = context.get_auto_parallel_context("gradients_mean")
>>> if mean.get_device_num_is_set():
>>> degree = context.get_auto_parallel_context("device_num")
>>> else:

@ -22,7 +22,7 @@ import mindspore.common.dtype as mstype
from mindspore._checkparam import check_bool
from mindspore._checkparam import Validator as validator
from mindspore.nn.optim.optimizer import Optimizer
from mindspore.parallel._utils import _get_device_num, _get_mirror_mean
from mindspore.parallel._utils import _get_device_num, _get_gradients_mean
from src.grad_reducer_thor import DistributedGradReducerThor
_momentum_opt = C.MultitypeFuncGraph("momentum_opt")
@ -85,7 +85,7 @@ class THOR_GPU(Optimizer):
self.assign = P.Assign()
self.mul = P.Mul()
mean = _get_mirror_mean()
mean = _get_gradients_mean()
degree = _get_device_num()
self.grad_reducer_thorA = DistributedGradReducerThor(self.parameters, 0, mean, degree)
self.grad_reducer_thorG = DistributedGradReducerThor(self.parameters, 0, mean, degree)
@ -191,7 +191,7 @@ class THOR(Optimizer):
1.0 / 196, 1.0 / 196, 1.0 / 196,
1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49,
1.0]
mean = _get_mirror_mean()
mean = _get_gradients_mean()
degree = _get_device_num()
self.grad_reducer_Amax = DistributedGradReducerThor(self.parameters, 2, mean, degree)
self.grad_reducer_Gmax = DistributedGradReducerThor(self.parameters, 5, mean, degree)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save