fix save_inference_model and load_inference_mode alias. test=develop (#28736)

musl/fix_failed_unittests_in_musl
Shibo Tao 4 years ago committed by GitHub
parent abbc507a81
commit f0806bdaf2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -23,8 +23,8 @@ __all__ = [
]
from . import nn
from .io import save_inference_model
from .io import load_inference_model
from .io import save_inference_model #DEFINE_ALIAS
from .io import load_inference_model #DEFINE_ALIAS
from ..fluid import Scope #DEFINE_ALIAS
from .input import data #DEFINE_ALIAS
from .input import InputSpec #DEFINE_ALIAS

@ -97,7 +97,7 @@ def save_inference_model(path_prefix, feed_vars, fetch_vars, executor):
# Feed data and train process
# Save inference model. Note we don't save label and loss in this example
paddle.static.io.save_inference_model(path_prefix, [image], [predict], exe)
paddle.static.save_inference_model(path_prefix, [image], [predict], exe)
# In this example, the save_inference_mode inference will prune the default
# main program according to the network's input node (img) and output node(predict).
@ -239,10 +239,10 @@ def load_inference_model(path_prefix, executor, **configs):
# Save the inference model
path_prefix = "./infer_model"
paddle.static.io.save_inference_model(path_prefix, [image], [hidden_b], exe)
paddle.static.save_inference_model(path_prefix, [image], [hidden_b], exe)
[inference_program, feed_target_names, fetch_targets] = (
paddle.static.io.load_inference_model(path_prefix, exe))
paddle.static.load_inference_model(path_prefix, exe))
tensor_img = np.array(np.random.random((64, 784)), dtype=np.float32)
results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img},

Loading…
Cancel
Save