Restore quantization and distillation stategy before loading persistables. (#16958)

test=develop
shanyi15-patch-1
whs 6 years ago committed by GitHub
parent 8bcba3db84
commit 486f7d8ed6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -363,6 +363,9 @@ class Compressor(object):
strategies = pickle.load(
strategy_file, encoding='bytes')
for strategy in strategies:
strategy.restore_from_checkpoint(context)
if os.path.exists(model_path):
exe = SlimGraphExecutor(context.place)
with scope_guard(context.scope):

@ -46,3 +46,6 @@ class Strategy(object):
def on_compression_end(self, context):
pass
def restore_from_checkpoint(self, context):
pass

@ -38,7 +38,7 @@ class DistillationStrategy(Strategy):
super(DistillationStrategy, self).__init__(start_epoch, end_epoch)
self.distillers = distillers
def on_compression_begin(self, context):
def restore_from_checkpoint(self, context):
# load from checkpoint
if context.epoch_id > 0:
if context.epoch_id > self.start_epoch and context.epoch_id < self.end_epoch:

@ -88,7 +88,7 @@ class QuantizationStrategy(Strategy):
self.save_out_nodes = save_out_nodes
self.save_in_nodes = save_in_nodes
def on_compression_begin(self, context):
def restore_from_checkpoint(self, context):
"""
Restore graph when the compressoin task is inited from checkpoint.
"""
@ -143,10 +143,9 @@ class QuantizationStrategy(Strategy):
train_ir_graph.graph).with_data_parallel(
loss_name=context.optimize_graph.out_nodes['loss'],
build_strategy=build_strategy)
# for evaluation. And program compiled from ir graph must be with data parallel.
context.eval_graph.compiled_graph = CompiledProgram(
test_ir_graph.graph).with_data_parallel(
build_strategy=build_strategy)
context.eval_graph.program = test_ir_graph.to_program()
# for saving inference model after training
context.put('quantization_test_ir_graph_backup', test_ir_graph)

Loading…
Cancel
Save