diff --git a/paddle/fluid/inference/analysis/ir_pass_manager.cc b/paddle/fluid/inference/analysis/ir_pass_manager.cc index 403ebfe72a..99611ce84b 100644 --- a/paddle/fluid/inference/analysis/ir_pass_manager.cc +++ b/paddle/fluid/inference/analysis/ir_pass_manager.cc @@ -67,10 +67,8 @@ void IRPassManager::CreatePasses(Argument *argument, pass->Set("max_batch_size", new int(argument->tensorrt_max_batch_size())); pass->Set("min_subgraph_size", new int(argument->tensorrt_min_subgraph_size())); - pass->Set( - "program", - new framework::ProgramDesc *( - const_cast(&argument->main_program()))); + pass->Set("program", + new framework::ProgramDesc *(&argument->main_program())); bool enable_int8 = argument->tensorrt_precision_mode() == contrib::AnalysisConfig::Precision::kInt8; diff --git a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h index e83247d39e..2ff35c7c6a 100644 --- a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h +++ b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h @@ -114,9 +114,9 @@ class TensorRTEngineOp : public framework::OperatorBase { framework::Executor executor(dev_place); auto *block = Attr("sub_block"); auto *program = block->Program(); - auto *scope_ptr = const_cast(&scope); + auto ¤t_scope = scope.NewScope(); auto ctx = executor.Prepare(*program, block->ID()); - executor.RunPreparedContext(ctx.get(), scope_ptr, false, true, true); + executor.RunPreparedContext(ctx.get(), ¤t_scope, false, true, true); } void RunImpl(const framework::Scope &scope,