|
|
@ -312,11 +312,19 @@ static constexpr char kDefaultAllocatorStrategy[] = "naive_best_fit";
|
|
|
|
#else
|
|
|
|
#else
|
|
|
|
static constexpr char kDefaultAllocatorStrategy[] = "auto_growth";
|
|
|
|
static constexpr char kDefaultAllocatorStrategy[] = "auto_growth";
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
DEFINE_string(allocator_strategy, kDefaultAllocatorStrategy,
|
|
|
|
DEFINE_string(
|
|
|
|
"The allocation strategy. naive_best_fit means the original best "
|
|
|
|
allocator_strategy, kDefaultAllocatorStrategy,
|
|
|
|
"fit allocator of Fluid. "
|
|
|
|
"The allocation strategy, enum in [naive_best_fit, auto_growth]. "
|
|
|
|
"auto_growth means the experimental auto-growth allocator. "
|
|
|
|
"naive_best_fit means the original pre-allocated allocator of Paddle. "
|
|
|
|
"Enum in [naive_best_fit, auto_growth].");
|
|
|
|
"auto_growth means the auto-growth allocator. "
|
|
|
|
|
|
|
|
"These two strategies differ in GPU memory allocation. "
|
|
|
|
|
|
|
|
"naive_best_fit strategy would occupy almost all GPU memory by default, "
|
|
|
|
|
|
|
|
"which prevents users from starting several Paddle jobs on the same GPU "
|
|
|
|
|
|
|
|
"card but leads to less memory fragmentation (i.e., maximum batch "
|
|
|
|
|
|
|
|
"size of models may be larger). auto_growth strategy would allocate "
|
|
|
|
|
|
|
|
"GPU memory on demand, which allows users to start several Paddle jobs "
|
|
|
|
|
|
|
|
"on the same GPU card but may lead to more memory fragmentation "
|
|
|
|
|
|
|
|
"(i.e., maximum batch size of models may be smaller).");
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
/**
|
|
|
|
* Memory related FLAG
|
|
|
|
* Memory related FLAG
|
|
|
|