[Inference] EnableUseGpu has higher priority than flags (#29697)

* enable_use_gpu has higher priority than FLAGS

* update.
revert-31562-mean
Wilber 4 years ago committed by GitHub
parent 9cbcc6cadc
commit b593d588aa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -18,6 +18,10 @@
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/gpu_info.h"
#ifdef PADDLE_WITH_CUDA
DECLARE_uint64(initial_gpu_memory_in_mb);
#endif
namespace paddle { namespace paddle {
struct MkldnnQuantizerConfig; struct MkldnnQuantizerConfig;
@ -68,6 +72,7 @@ void AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb,
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
use_gpu_ = true; use_gpu_ = true;
memory_pool_init_size_mb_ = memory_pool_init_size_mb; memory_pool_init_size_mb_ = memory_pool_init_size_mb;
FLAGS_initial_gpu_memory_in_mb = memory_pool_init_size_mb_;
device_id_ = device_id; device_id_ = device_id;
#else #else
LOG(ERROR) << "Please compile with gpu to EnableGpu()"; LOG(ERROR) << "Please compile with gpu to EnableGpu()";
@ -482,12 +487,16 @@ float AnalysisConfig::fraction_of_gpu_memory_for_pool() const {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
// Get the GPU memory details and calculate the fraction of memory for the // Get the GPU memory details and calculate the fraction of memory for the
// GPU memory pool. // GPU memory pool.
size_t gpu_used, gpu_available; size_t gpu_total, gpu_available;
platform::SetDeviceId(device_id_); platform::SetDeviceId(device_id_);
platform::GpuMemoryUsage(&gpu_used, &gpu_available); platform::GpuMemoryUsage(&gpu_available, &gpu_total);
double total_gpu_memory = (gpu_used + gpu_available) / 1024. / 1024.; double total_gpu_memory = gpu_total / 1024. / 1024.;
float fraction_of_gpu_memory = float fraction_of_gpu_memory =
static_cast<double>(memory_pool_init_size_mb()) / total_gpu_memory; static_cast<double>(memory_pool_init_size_mb()) / total_gpu_memory;
VLOG(3) << "total_gpu_memory is " << total_gpu_memory
<< "M, gpu_available is " << gpu_available / 1024. / 1024.
<< "M, memory_pool_init_size is " << memory_pool_init_size_mb()
<< "M.";
return fraction_of_gpu_memory; return fraction_of_gpu_memory;
#else #else
return 0.; return 0.;

Loading…
Cancel
Save