From d1a6e112e60e19fcfdc0ff4ea69a5fdb5c63a1d8 Mon Sep 17 00:00:00 2001 From: Zhaolong Xing Date: Wed, 27 Nov 2019 21:08:26 +0800 Subject: [PATCH] fix C++ multicard inference bug. (#20955) test=develop --- paddle/fluid/inference/api/analysis_predictor.cc | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 7dcaf12a7c..cccaa09540 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -123,7 +123,7 @@ bool AnalysisPredictor::PrepareScope( status_is_cloned_ = true; } else { if (config_.use_gpu_) { - paddle::framework::InitDevices(false, {config_.device_id_}); + paddle::framework::InitDevices(false); } else { paddle::framework::InitDevices(false, {}); } @@ -501,8 +501,6 @@ std::unique_ptr CreatePaddlePredictor< std::string flag = "--fraction_of_gpu_memory_to_use=" + std::to_string(fraction_of_gpu_memory); flags.push_back(flag); - flags.push_back("--selected_gpus=" + - std::to_string(config.gpu_device_id())); VLOG(3) << "set flag: " << flag; framework::InitGflags(flags); }