|
|
|
@ -54,7 +54,8 @@ std::string num2str(T a) {
|
|
|
|
|
}
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
|
|
bool NativePaddlePredictor::Init() {
|
|
|
|
|
bool NativePaddlePredictor::Init(
|
|
|
|
|
std::shared_ptr<framework::Scope> parent_scope) {
|
|
|
|
|
VLOG(3) << "Predictor::init()";
|
|
|
|
|
|
|
|
|
|
if (config_.use_gpu) {
|
|
|
|
@ -62,9 +63,15 @@ bool NativePaddlePredictor::Init() {
|
|
|
|
|
} else {
|
|
|
|
|
place_ = paddle::platform::CPUPlace();
|
|
|
|
|
}
|
|
|
|
|
paddle::framework::InitDevices(false);
|
|
|
|
|
if (parent_scope) {
|
|
|
|
|
scope_ = parent_scope;
|
|
|
|
|
sub_scope_ = &(parent_scope->NewScope());
|
|
|
|
|
} else {
|
|
|
|
|
paddle::framework::InitDevices(false);
|
|
|
|
|
scope_.reset(new paddle::framework::Scope());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
executor_.reset(new paddle::framework::Executor(place_));
|
|
|
|
|
scope_.reset(new paddle::framework::Scope());
|
|
|
|
|
|
|
|
|
|
// Initialize the inference program
|
|
|
|
|
if (!config_.model_dir.empty()) {
|
|
|
|
@ -83,13 +90,8 @@ bool NativePaddlePredictor::Init() {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
ctx_ = executor_->Prepare(*inference_program_, 0);
|
|
|
|
|
|
|
|
|
|
// Create temporary variables first, so that the first batch do not need to
|
|
|
|
|
// create variables in the runtime. This is the logics of the old inference
|
|
|
|
|
// API.
|
|
|
|
|
// TODO(Superjomn) this should be modified when `Clone` is valid for
|
|
|
|
|
// multi-thread application.
|
|
|
|
|
executor_->CreateVariables(*inference_program_, scope_.get(), 0);
|
|
|
|
|
executor_->CreateVariables(
|
|
|
|
|
*inference_program_, sub_scope_ ? sub_scope_ : scope_.get(), 0);
|
|
|
|
|
|
|
|
|
|
// Get the feed_target_names and fetch_target_names
|
|
|
|
|
feed_target_names_ = inference_program_->GetFeedTargetNames();
|
|
|
|
@ -97,6 +99,13 @@ bool NativePaddlePredictor::Init() {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
NativePaddlePredictor::~NativePaddlePredictor() {
|
|
|
|
|
if (sub_scope_) {
|
|
|
|
|
PADDLE_ENFORCE_NOT_NULL(scope_, "Should have parent scope!");
|
|
|
|
|
scope_->DeleteScope(sub_scope_);
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
bool NativePaddlePredictor::Run(const std::vector<PaddleTensor> &inputs,
|
|
|
|
|
std::vector<PaddleTensor> *output_data) {
|
|
|
|
|
VLOG(3) << "Predictor::predict";
|
|
|
|
@ -121,11 +130,12 @@ bool NativePaddlePredictor::Run(const std::vector<PaddleTensor> &inputs,
|
|
|
|
|
}
|
|
|
|
|
// Run the inference program
|
|
|
|
|
// if share variables, we need not create variables
|
|
|
|
|
executor_->RunPreparedContext(ctx_.get(),
|
|
|
|
|
scope_.get(),
|
|
|
|
|
&feed_targets,
|
|
|
|
|
&fetch_targets,
|
|
|
|
|
false /* don't create variable eatch time */);
|
|
|
|
|
executor_->RunPreparedContext(
|
|
|
|
|
ctx_.get(),
|
|
|
|
|
sub_scope_ != nullptr ? sub_scope_ : scope_.get(),
|
|
|
|
|
&feed_targets,
|
|
|
|
|
&fetch_targets,
|
|
|
|
|
false /* don't create variable eatch time */);
|
|
|
|
|
if (!GetFetch(fetchs, output_data)) {
|
|
|
|
|
LOG(ERROR) << "fail to get fetchs";
|
|
|
|
|
return false;
|
|
|
|
@ -138,7 +148,7 @@ std::unique_ptr<PaddlePredictor> NativePaddlePredictor::Clone() {
|
|
|
|
|
VLOG(3) << "Predictor::clone";
|
|
|
|
|
std::unique_ptr<PaddlePredictor> cls(new NativePaddlePredictor(config_));
|
|
|
|
|
|
|
|
|
|
if (!dynamic_cast<NativePaddlePredictor *>(cls.get())->Init()) {
|
|
|
|
|
if (!dynamic_cast<NativePaddlePredictor *>(cls.get())->Init(scope_)) {
|
|
|
|
|
LOG(ERROR) << "fail to call Init";
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
@ -266,7 +276,7 @@ CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::unique_ptr<PaddlePredictor> predictor(new NativePaddlePredictor(config));
|
|
|
|
|
if (!dynamic_cast<NativePaddlePredictor *>(predictor.get())->Init()) {
|
|
|
|
|
if (!dynamic_cast<NativePaddlePredictor *>(predictor.get())->Init(nullptr)) {
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
return std::move(predictor);
|
|
|
|
|