|
|
@ -103,6 +103,7 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) {
|
|
|
|
CP_MEMBER(tensorrt_max_batchsize_);
|
|
|
|
CP_MEMBER(tensorrt_max_batchsize_);
|
|
|
|
CP_MEMBER(tensorrt_min_subgraph_size_);
|
|
|
|
CP_MEMBER(tensorrt_min_subgraph_size_);
|
|
|
|
CP_MEMBER(tensorrt_precision_mode_);
|
|
|
|
CP_MEMBER(tensorrt_precision_mode_);
|
|
|
|
|
|
|
|
CP_MEMBER(trt_use_static_engine_);
|
|
|
|
// MKLDNN related.
|
|
|
|
// MKLDNN related.
|
|
|
|
CP_MEMBER(use_mkldnn_);
|
|
|
|
CP_MEMBER(use_mkldnn_);
|
|
|
|
CP_MEMBER(mkldnn_enabled_op_types_);
|
|
|
|
CP_MEMBER(mkldnn_enabled_op_types_);
|
|
|
@ -144,7 +145,7 @@ void AnalysisConfig::EnableMKLDNN() {
|
|
|
|
|
|
|
|
|
|
|
|
void AnalysisConfig::EnableTensorRtEngine(
|
|
|
|
void AnalysisConfig::EnableTensorRtEngine(
|
|
|
|
int workspace_size, int max_batch_size, int min_subgraph_size,
|
|
|
|
int workspace_size, int max_batch_size, int min_subgraph_size,
|
|
|
|
AnalysisConfig::Precision precision_mode) {
|
|
|
|
AnalysisConfig::Precision precision_mode, bool use_static) {
|
|
|
|
#ifdef PADDLE_WITH_CUDA
|
|
|
|
#ifdef PADDLE_WITH_CUDA
|
|
|
|
if (!use_gpu()) {
|
|
|
|
if (!use_gpu()) {
|
|
|
|
LOG(ERROR) << "To use TensorRT engine, please call EnableGpu() first";
|
|
|
|
LOG(ERROR) << "To use TensorRT engine, please call EnableGpu() first";
|
|
|
@ -156,6 +157,7 @@ void AnalysisConfig::EnableTensorRtEngine(
|
|
|
|
tensorrt_max_batchsize_ = max_batch_size;
|
|
|
|
tensorrt_max_batchsize_ = max_batch_size;
|
|
|
|
tensorrt_min_subgraph_size_ = min_subgraph_size;
|
|
|
|
tensorrt_min_subgraph_size_ = min_subgraph_size;
|
|
|
|
tensorrt_precision_mode_ = precision_mode;
|
|
|
|
tensorrt_precision_mode_ = precision_mode;
|
|
|
|
|
|
|
|
trt_use_static_engine_ = use_static;
|
|
|
|
|
|
|
|
|
|
|
|
Update();
|
|
|
|
Update();
|
|
|
|
#else
|
|
|
|
#else
|
|
|
|