|
|
|
@ -650,99 +650,6 @@ class ProgramTranslator(object):
|
|
|
|
|
source_code = ast_to_source_code(root_wrapper.node)
|
|
|
|
|
return source_code
|
|
|
|
|
|
|
|
|
|
@deprecated(since='2.0', instead="paddle.imperative.jit.save")
|
|
|
|
|
@switch_to_static_graph
|
|
|
|
|
def save_inference_model(self, dirname, feed=None, fetch=None):
|
|
|
|
|
"""
|
|
|
|
|
Saves current model as the inference model. It will prune the main_program
|
|
|
|
|
to build a new program especially for inference, and then save it and all
|
|
|
|
|
related parameters to given `dirname` . The saved inference model can be
|
|
|
|
|
loaded by `:ref:`api_fluid_io_load_inference_model` or `C++ inference APIs.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
dirname (str): the directory to save the inference model.
|
|
|
|
|
feed (list[int], optional): the indices of the input variables of the
|
|
|
|
|
dygraph functions which will be saved as input variables in
|
|
|
|
|
inference model. If None, all input variables of the dygraph function
|
|
|
|
|
would be the inputs of the saved inference model. Default None.
|
|
|
|
|
fetch (list[int], optional): the indices of the returned variable of the
|
|
|
|
|
dygraph functions which will be saved as output variables in
|
|
|
|
|
inference model. If None, all output variables of the dygraph function
|
|
|
|
|
would be the outputs of the saved inference model. Default None.
|
|
|
|
|
Returns:
|
|
|
|
|
None
|
|
|
|
|
Examples:
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
import numpy as np
|
|
|
|
|
import paddle.fluid as fluid
|
|
|
|
|
from paddle.fluid.dygraph import Linear
|
|
|
|
|
from paddle.fluid.dygraph import declarative
|
|
|
|
|
from paddle.fluid.dygraph import ProgramTranslator
|
|
|
|
|
|
|
|
|
|
class SimpleNet(fluid.dygraph.Layer):
|
|
|
|
|
def __init__(self, in_size, out_size):
|
|
|
|
|
super(SimpleNet, self).__init__()
|
|
|
|
|
self._linear = Linear(in_size, out_size)
|
|
|
|
|
|
|
|
|
|
@declarative
|
|
|
|
|
def forward(self, x):
|
|
|
|
|
y = self._linear(x)
|
|
|
|
|
z = self._linear(y)
|
|
|
|
|
loss = fluid.layers.mean(z)
|
|
|
|
|
return z, loss
|
|
|
|
|
|
|
|
|
|
with fluid.dygraph.guard(fluid.CPUPlace()):
|
|
|
|
|
net = SimpleNet(8, 8)
|
|
|
|
|
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
|
|
|
|
|
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
|
|
|
|
|
for i in range(10):
|
|
|
|
|
loss, out = net(x)
|
|
|
|
|
loss.backward()
|
|
|
|
|
adam.minimize(loss)
|
|
|
|
|
net.clear_gradients()
|
|
|
|
|
# Save inference model.
|
|
|
|
|
# Note that fetch=[0] means we set 'z' as the inference output.
|
|
|
|
|
prog_trans = ProgramTranslator()
|
|
|
|
|
prog_trans.save_inference_model("./dy2stat_infer_model", fetch=[0])
|
|
|
|
|
|
|
|
|
|
# In this example, the inference model will be pruned based on output (z).
|
|
|
|
|
# The pruned inference program is going to be saved in the folder
|
|
|
|
|
# "./dy2stat_infer_model" and parameters are going to be saved in separate
|
|
|
|
|
# files in the folder.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def get_feed_fetch(var_list, partial_vars, return_name=False):
|
|
|
|
|
vars = [
|
|
|
|
|
var for var in var_list if isinstance(var, framework.Variable)
|
|
|
|
|
]
|
|
|
|
|
if partial_vars:
|
|
|
|
|
vars = [vars[idx] for idx in partial_vars]
|
|
|
|
|
if return_name:
|
|
|
|
|
vars = [var.name for var in vars]
|
|
|
|
|
|
|
|
|
|
return vars
|
|
|
|
|
|
|
|
|
|
func_spec, (concrete_program,
|
|
|
|
|
partial_layer) = self._program_cache.last()
|
|
|
|
|
# share paramBase data with parameter
|
|
|
|
|
scope = core.Scope()
|
|
|
|
|
for param_base in concrete_program.parameters:
|
|
|
|
|
param_tensor = scope.var(param_base.name).get_tensor()
|
|
|
|
|
src_tensor = param_base.value().get_tensor()
|
|
|
|
|
param_tensor._share_data_with(src_tensor)
|
|
|
|
|
|
|
|
|
|
feed_var_names = get_feed_fetch(concrete_program.inputs, feed, True)
|
|
|
|
|
fetch_vars = get_feed_fetch(concrete_program.outputs, fetch)
|
|
|
|
|
|
|
|
|
|
from paddle.fluid.io import save_inference_model
|
|
|
|
|
with scope_guard(scope):
|
|
|
|
|
save_inference_model(
|
|
|
|
|
dirname=dirname,
|
|
|
|
|
feeded_var_names=feed_var_names,
|
|
|
|
|
target_vars=fetch_vars,
|
|
|
|
|
executor=executor.Executor(framework._current_expected_place()),
|
|
|
|
|
main_program=concrete_program.main_program.clone())
|
|
|
|
|
|
|
|
|
|
def get_program_cache(self):
|
|
|
|
|
"""
|
|
|
|
|
Returns the ProgramCache instance. This method is used by PaddlePaddle
|
|
|
|
|