|
|
|
@ -45,6 +45,7 @@ DEFINE_bool(use_analysis, true,
|
|
|
|
|
"Running the inference program in analysis mode.");
|
|
|
|
|
DEFINE_bool(record_benchmark, false,
|
|
|
|
|
"Record benchmark after profiling the model");
|
|
|
|
|
DEFINE_double(accuracy, 1e-3, "Result Accuracy.");
|
|
|
|
|
|
|
|
|
|
DECLARE_bool(profile);
|
|
|
|
|
DECLARE_int32(paddle_num_threads);
|
|
|
|
@ -85,7 +86,7 @@ void CompareResult(const std::vector<PaddleTensor> &outputs,
|
|
|
|
|
float *pdata = static_cast<float *>(out.data.data());
|
|
|
|
|
float *pdata_ref = static_cast<float *>(ref_out.data.data());
|
|
|
|
|
for (size_t j = 0; j < size; ++j) {
|
|
|
|
|
EXPECT_NEAR(pdata_ref[j], pdata[j], 1e-3);
|
|
|
|
|
EXPECT_NEAR(pdata_ref[j], pdata[j], FLAGS_accuracy);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
@ -283,6 +284,26 @@ void TestPrediction(const PaddlePredictor::Config *config,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void CompareDeterministic(
|
|
|
|
|
const PaddlePredictor::Config *config,
|
|
|
|
|
const std::vector<std::vector<PaddleTensor>> &inputs) {
|
|
|
|
|
int batch_size = FLAGS_batch_size;
|
|
|
|
|
int num_times = FLAGS_repeat;
|
|
|
|
|
auto predictor = CreateTestPredictor(config, FLAGS_use_analysis);
|
|
|
|
|
|
|
|
|
|
// warmup run
|
|
|
|
|
std::vector<PaddleTensor> warmup_outputs, outputs;
|
|
|
|
|
predictor->Run(inputs[0], &warmup_outputs, batch_size);
|
|
|
|
|
|
|
|
|
|
// run num_times to Compare Deterministic Result.
|
|
|
|
|
for (int i = 0; i < num_times; i++) {
|
|
|
|
|
for (size_t j = 0; j < inputs.size(); j++) {
|
|
|
|
|
predictor->Run(inputs[j], &outputs, batch_size);
|
|
|
|
|
CompareResult(outputs, warmup_outputs);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void CompareNativeAndAnalysis(
|
|
|
|
|
const PaddlePredictor::Config *config,
|
|
|
|
|
const std::vector<std::vector<PaddleTensor>> &inputs) {
|
|
|
|
|