|
|
|
@ -37,12 +37,20 @@ TEST(Analyzer, analysis_without_tensorrt) {
|
|
|
|
|
TEST(Analyzer, analysis_with_tensorrt) {
|
|
|
|
|
FLAGS_IA_enable_tensorrt_subgraph_engine = true;
|
|
|
|
|
Argument argument;
|
|
|
|
|
int* minimum_subgraph_size = new int(0);
|
|
|
|
|
int* max_batch_size = new int(3);
|
|
|
|
|
int* workspace_size = new int(1 << 20);
|
|
|
|
|
std::string* precision_mode = new std::string("FP32");
|
|
|
|
|
argument.Set<int>("minimum_subgraph_size", minimum_subgraph_size);
|
|
|
|
|
argument.Set<int>("max_batch_size", max_batch_size);
|
|
|
|
|
argument.Set<int>("workspace_size", workspace_size);
|
|
|
|
|
argument.Set<std::string>("precision_mode", precision_mode);
|
|
|
|
|
argument.fluid_model_dir.reset(new std::string(FLAGS_inference_model_dir));
|
|
|
|
|
Analyzer analyser;
|
|
|
|
|
analyser.Run(&argument);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void TestWord2vecPrediction(const std::string &model_path) {
|
|
|
|
|
void TestWord2vecPrediction(const std::string& model_path) {
|
|
|
|
|
NativeConfig config;
|
|
|
|
|
config.model_dir = model_path;
|
|
|
|
|
config.use_gpu = false;
|
|
|
|
@ -73,8 +81,8 @@ void TestWord2vecPrediction(const std::string &model_path) {
|
|
|
|
|
// The outputs' buffers are in CPU memory.
|
|
|
|
|
for (size_t i = 0; i < std::min(5UL, num_elements); i++) {
|
|
|
|
|
LOG(INFO) << "data: "
|
|
|
|
|
<< static_cast<float *>(outputs.front().data.data())[i];
|
|
|
|
|
PADDLE_ENFORCE(static_cast<float *>(outputs.front().data.data())[i],
|
|
|
|
|
<< static_cast<float*>(outputs.front().data.data())[i];
|
|
|
|
|
PADDLE_ENFORCE(static_cast<float*>(outputs.front().data.data())[i],
|
|
|
|
|
result[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|