|
|
@ -470,10 +470,10 @@ def save(layer, path, input_spec=None, **configs):
|
|
|
|
format model, which can be used for inference or fine-tuning after loading.
|
|
|
|
format model, which can be used for inference or fine-tuning after loading.
|
|
|
|
|
|
|
|
|
|
|
|
It will save the translated program and all related persistable
|
|
|
|
It will save the translated program and all related persistable
|
|
|
|
variables of input Layer to given ``path``.
|
|
|
|
variables of input Layer to given ``path`` .
|
|
|
|
|
|
|
|
|
|
|
|
``path`` is the prefix of saved objects, and the saved translated program file
|
|
|
|
``path`` is the prefix of saved objects, and the saved translated program file
|
|
|
|
suffix is ``.pdmodel``, the saved persistable variables file suffix is ``.pdiparams``,
|
|
|
|
suffix is ``.pdmodel`` , the saved persistable variables file suffix is ``.pdiparams`` ,
|
|
|
|
and here also saved some additional variable description information to a file,
|
|
|
|
and here also saved some additional variable description information to a file,
|
|
|
|
its suffix is ``.pdiparams.info``, these additional information is used in fine-tuning.
|
|
|
|
its suffix is ``.pdiparams.info``, these additional information is used in fine-tuning.
|
|
|
|
|
|
|
|
|
|
|
@ -483,18 +483,17 @@ def save(layer, path, input_spec=None, **configs):
|
|
|
|
- Other C++ inference APIs
|
|
|
|
- Other C++ inference APIs
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
Args:
|
|
|
|
layer (Layer): the Layer to be saved. The Layer should be decorated by `@paddle.jit.to_static`.
|
|
|
|
layer (Layer): The Layer to be saved.
|
|
|
|
path (str): The path prefix to save model. The format is ``dirname/file_prefix`` or ``file_prefix``.
|
|
|
|
path (str): The path prefix to save model. The format is ``dirname/file_prefix`` or ``file_prefix``.
|
|
|
|
input_spec (list[InputSpec|Tensor], optional): Describes the input of the saved model.
|
|
|
|
input_spec (list[InputSpec|Tensor], optional): Describes the input of the saved model's forward
|
|
|
|
It is the example inputs that will be passed to saved TranslatedLayer's forward
|
|
|
|
method, which can be described by InputSpec or example Tensor. If None, all input variables of
|
|
|
|
function. If None, all input variables of the original Layer's forward function
|
|
|
|
the original Layer's forward method would be the inputs of the saved model. Default None.
|
|
|
|
would be the inputs of the saved model. Default None.
|
|
|
|
**configs (dict, optional): Other save configuration options for compatibility. We do not
|
|
|
|
**configs (dict, optional): other save configuration options for compatibility. We do not
|
|
|
|
|
|
|
|
recommend using these configurations, they may be removed in the future. If not necessary,
|
|
|
|
recommend using these configurations, they may be removed in the future. If not necessary,
|
|
|
|
DO NOT use them. Default None.
|
|
|
|
DO NOT use them. Default None.
|
|
|
|
The following options are currently supported:
|
|
|
|
The following options are currently supported:
|
|
|
|
(1) output_spec (list[Tensor]): Selects the output targets of the saved model.
|
|
|
|
(1) output_spec (list[Tensor]): Selects the output targets of the saved model.
|
|
|
|
By default, all return variables of original Layer's forward function are kept as the
|
|
|
|
By default, all return variables of original Layer's forward method are kept as the
|
|
|
|
output of the saved model. If the provided ``output_spec`` list is not all output variables,
|
|
|
|
output of the saved model. If the provided ``output_spec`` list is not all output variables,
|
|
|
|
the saved model will be pruned according to the given ``output_spec`` list.
|
|
|
|
the saved model will be pruned according to the given ``output_spec`` list.
|
|
|
|
|
|
|
|
|
|
|
@ -735,14 +734,14 @@ def load(path, **configs):
|
|
|
|
4. The parameter's ``trainable`` information is lost and can not be recovered.
|
|
|
|
4. The parameter's ``trainable`` information is lost and can not be recovered.
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
Args:
|
|
|
|
path (str): The path prefix to load model. The format is ``dirname/file_prefix`` or ``file_prefix``.
|
|
|
|
path (str): The path prefix to load model. The format is ``dirname/file_prefix`` or ``file_prefix`` .
|
|
|
|
**configs (dict, optional): other load configuration options for compatibility. We do not
|
|
|
|
**configs (dict, optional): Other load configuration options for compatibility. We do not
|
|
|
|
recommend using these configurations, they may be removed in the future. If not necessary,
|
|
|
|
recommend using these configurations, they may be removed in the future. If not necessary,
|
|
|
|
DO NOT use them. Default None.
|
|
|
|
DO NOT use them. Default None.
|
|
|
|
The following options are currently supported:
|
|
|
|
The following options are currently supported:
|
|
|
|
(1) model_filename (string): The inference model file name of the paddle 1.x
|
|
|
|
(1) model_filename (str): The inference model file name of the paddle 1.x
|
|
|
|
``save_inference_model`` save format. Default file name is :code:`__model__` .
|
|
|
|
``save_inference_model`` save format. Default file name is :code:`__model__` .
|
|
|
|
(2) params_filename (string): The persistable variables file name of the paddle 1.x
|
|
|
|
(2) params_filename (str): The persistable variables file name of the paddle 1.x
|
|
|
|
``save_inference_model`` save format. No default file name, save variables separately
|
|
|
|
``save_inference_model`` save format. No default file name, save variables separately
|
|
|
|
by default.
|
|
|
|
by default.
|
|
|
|
|
|
|
|
|
|
|
@ -844,7 +843,6 @@ def load(path, **configs):
|
|
|
|
|
|
|
|
|
|
|
|
import numpy as np
|
|
|
|
import numpy as np
|
|
|
|
import paddle
|
|
|
|
import paddle
|
|
|
|
import paddle.fluid as fluid
|
|
|
|
|
|
|
|
import paddle.static as static
|
|
|
|
import paddle.static as static
|
|
|
|
import paddle.nn as nn
|
|
|
|
import paddle.nn as nn
|
|
|
|
import paddle.optimizer as opt
|
|
|
|
import paddle.optimizer as opt
|
|
|
@ -870,9 +868,11 @@ def load(path, **configs):
|
|
|
|
def __len__(self):
|
|
|
|
def __len__(self):
|
|
|
|
return self.num_samples
|
|
|
|
return self.num_samples
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
paddle.enable_static()
|
|
|
|
|
|
|
|
|
|
|
|
image = static.data(name='image', shape=[None, 784], dtype='float32')
|
|
|
|
image = static.data(name='image', shape=[None, 784], dtype='float32')
|
|
|
|
label = static.data(name='label', shape=[None, 1], dtype='int64')
|
|
|
|
label = static.data(name='label', shape=[None, 1], dtype='int64')
|
|
|
|
pred = static.nn.fc(input=image, size=10, act='softmax')
|
|
|
|
pred = static.nn.fc(x=image, size=10, activation='softmax')
|
|
|
|
loss = F.cross_entropy(input=pred, label=label)
|
|
|
|
loss = F.cross_entropy(input=pred, label=label)
|
|
|
|
avg_loss = paddle.mean(loss)
|
|
|
|
avg_loss = paddle.mean(loss)
|
|
|
|
|
|
|
|
|
|
|
@ -901,7 +901,7 @@ def load(path, **configs):
|
|
|
|
fetch_list=[avg_loss])
|
|
|
|
fetch_list=[avg_loss])
|
|
|
|
|
|
|
|
|
|
|
|
model_path = "fc.example.model"
|
|
|
|
model_path = "fc.example.model"
|
|
|
|
fluid.io.save_inference_model(
|
|
|
|
paddle.fluid.io.save_inference_model(
|
|
|
|
model_path, ["image"], [pred], exe)
|
|
|
|
model_path, ["image"], [pred], exe)
|
|
|
|
|
|
|
|
|
|
|
|
# 2. load model
|
|
|
|
# 2. load model
|
|
|
|