|
|
|
@ -2854,10 +2854,6 @@ class Program(object):
|
|
|
|
|
self._use_hierarchical_allreduce = False
|
|
|
|
|
self._hierarchical_allreduce_inter_nranks = 0
|
|
|
|
|
|
|
|
|
|
# @deprecated(the python memory optimize transpiler is deprecated)
|
|
|
|
|
# whether the program is optimized by memory_optimize_transpiler
|
|
|
|
|
self.__is_mem_optimized = False
|
|
|
|
|
|
|
|
|
|
# if this program has been optimized by distributed optimizer
|
|
|
|
|
# fleet_opt will be given a value
|
|
|
|
|
self._fleet_opt = None
|
|
|
|
@ -2869,16 +2865,6 @@ class Program(object):
|
|
|
|
|
# appending gradients times
|
|
|
|
|
self._appending_grad_times = 0
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def _is_mem_optimized(self):
|
|
|
|
|
# if the program is optimized, operator input/outputs
|
|
|
|
|
# maybe same, which conflict with save_inference_model.
|
|
|
|
|
return self.__is_mem_optimized
|
|
|
|
|
|
|
|
|
|
@_is_mem_optimized.setter
|
|
|
|
|
def _is_mem_optimized(self, target):
|
|
|
|
|
self.__is_mem_optimized = target
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def _op_role(self):
|
|
|
|
|
"""
|
|
|
|
|