add arguments for trt config

revert-13637-optimize-opyreader
nhzlx 7 years ago
parent 202e0a1ee1
commit 4801beb101

@ -37,6 +37,14 @@ TEST(Analyzer, analysis_without_tensorrt) {
TEST(Analyzer, analysis_with_tensorrt) { TEST(Analyzer, analysis_with_tensorrt) {
FLAGS_IA_enable_tensorrt_subgraph_engine = true; FLAGS_IA_enable_tensorrt_subgraph_engine = true;
Argument argument; Argument argument;
int* minimum_subgraph_size = new int(0);
int* max_batch_size = new int(3);
int* workspace_size = new int(1 << 20);
std::string* precision_mode = new std::string("FP32");
argument.Set<int>("minimum_subgraph_size", minimum_subgraph_size);
argument.Set<int>("max_batch_size", max_batch_size);
argument.Set<int>("workspace_size", workspace_size);
argument.Set<std::string>("precision_mode", precision_mode);
argument.fluid_model_dir.reset(new std::string(FLAGS_inference_model_dir)); argument.fluid_model_dir.reset(new std::string(FLAGS_inference_model_dir));
Analyzer analyser; Analyzer analyser;
analyser.Run(&argument); analyser.Run(&argument);

@ -309,7 +309,7 @@ void SubGraphFuse::operator()() { ReplaceNodesWithSubGraphs(); }
void SubGraphFuse::ReplaceNodesWithSubGraphs() { void SubGraphFuse::ReplaceNodesWithSubGraphs() {
auto subgraphs = SubGraphSplitter(graph_, node_inside_subgraph_teller_)(); auto subgraphs = SubGraphSplitter(graph_, node_inside_subgraph_teller_)();
for (auto &subgraph : subgraphs) { for (auto &subgraph : subgraphs) {
if (subgraph.size() <= argument_->Get<int>("minimun_subgraph_size")) if (subgraph.size() <= argument_->Get<int>("minimum_subgraph_size"))
continue; continue;
std::unordered_set<Node *> subgraph_uniq(subgraph.begin(), subgraph.end()); std::unordered_set<Node *> subgraph_uniq(subgraph.begin(), subgraph.end());
// replace this sub-graph with the first node. Two steps: 1. Create a Block // replace this sub-graph with the first node. Two steps: 1. Create a Block

@ -68,7 +68,7 @@ TEST(SubGraphSplitter, Fuse) {
auto dfg = ProgramDescToDFG(desc); auto dfg = ProgramDescToDFG(desc);
Argument argument; Argument argument;
int* minmum_subgraph_size = new int(3); int* minmum_subgraph_size = new int(3);
argument.Set<int>("minmum_subgraph_size", minmum_subgraph_size); argument.Set<int>("minimum_subgraph_size", minmum_subgraph_size);
size_t count0 = dfg.nodes.size(); size_t count0 = dfg.nodes.size();

@ -36,6 +36,14 @@ TEST(TensorRTSubGraphPass, main) {
}; };
Argument argument(FLAGS_inference_model_dir); Argument argument(FLAGS_inference_model_dir);
int* minimum_subgraph_size = new int(0);
int* max_batch_size = new int(3);
int* workspace_size = new int(1 << 20);
std::string* precision_mode = new std::string("FP32");
argument.Set<int>("minimun_subgraph_size", minimum_subgraph_size);
argument.Set<int>("max_batch_size", max_batch_size);
argument.Set<int>("workspace_size", workspace_size);
argument.Set<std::string>("precision_mode", precision_mode);
DFG_GraphvizDrawPass::Config config{FLAGS_dot_dir, "origin"}; DFG_GraphvizDrawPass::Config config{FLAGS_dot_dir, "origin"};
DFG_GraphvizDrawPass::Config config1{FLAGS_dot_dir, "fusion"}; DFG_GraphvizDrawPass::Config config1{FLAGS_dot_dir, "fusion"};

@ -94,7 +94,7 @@ class TensorRTSubgraphPredictor : public NativePaddlePredictor {
int* max_batch_size = new int(config_.max_batch_size); int* max_batch_size = new int(config_.max_batch_size);
int* workspace_size = new int(config_.workspace_size); int* workspace_size = new int(config_.workspace_size);
std::string* precision_mode = new std::string(config_.precision_mode); std::string* precision_mode = new std::string(config_.precision_mode);
argument.Set<int>("minimun_subgraph_size", minimum_subgraph_size); argument.Set<int>("minimum_subgraph_size", minimum_subgraph_size);
argument.Set<int>("max_batch_size", max_batch_size); argument.Set<int>("max_batch_size", max_batch_size);
argument.Set<int>("workspace_size", workspace_size); argument.Set<int>("workspace_size", workspace_size);
argument.Set<std::string>("precision_mode", precision_mode); argument.Set<std::string>("precision_mode", precision_mode);

Loading…
Cancel
Save