|
|
|
@ -173,12 +173,47 @@ TEST(Analyzer_MM_DNN, compare_determine) {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifdef PADDLE_WITH_MKLDNN
|
|
|
|
|
void TestMkldnnCacheClear(int mkldnn_input_shape_cache_capacity) {
|
|
|
|
|
void TestMkldnnCacheClear(int mkldnn_input_shape_cache_capacity,
|
|
|
|
|
std::vector<std::vector<PaddleTensor>> *outputs) {
|
|
|
|
|
AnalysisConfig config;
|
|
|
|
|
SetConfig(&config);
|
|
|
|
|
config.EnableMKLDNN();
|
|
|
|
|
// TODO(luotao): explicit following settings will be deprecated after enhance
|
|
|
|
|
// config.EnableMKLDNN() interface.
|
|
|
|
|
config.SetMkldnnCacheCapacity(mkldnn_input_shape_cache_capacity);
|
|
|
|
|
|
|
|
|
|
std::vector<PaddleTensor> input;
|
|
|
|
|
auto predictor = CreatePaddlePredictor<AnalysisConfig>(config);
|
|
|
|
|
|
|
|
|
|
int sample_num = 10;
|
|
|
|
|
DataRecord data(FLAGS_infer_data, FLAGS_batch_size);
|
|
|
|
|
outputs->resize(sample_num);
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < sample_num; i++) {
|
|
|
|
|
PrepareInputs(&input, &data, FLAGS_batch_size);
|
|
|
|
|
predictor->Run(input, &(*outputs)[i], 1);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST(Analyzer_MM_DNN, mkldnn_cache_clear) {
|
|
|
|
|
std::vector<std::vector<PaddleTensor>> outputs, cache_outputs;
|
|
|
|
|
// 0 means do not use cache clear strategy.
|
|
|
|
|
TestMkldnnCacheClear(0, &outputs);
|
|
|
|
|
// 4 means use cache clear strategy, and the
|
|
|
|
|
// mkldnn_input_shape_cache_capacity is 4.
|
|
|
|
|
TestMkldnnCacheClear(4, &cache_outputs);
|
|
|
|
|
// compare the result.
|
|
|
|
|
for (size_t i = 0; i < outputs.size(); i++) {
|
|
|
|
|
CompareResult(outputs[i], cache_outputs[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void TestMkldnnShapeBlobSize(int mkldnn_input_shape_cache_capacity) {
|
|
|
|
|
AnalysisConfig config;
|
|
|
|
|
SetConfig(&config);
|
|
|
|
|
config.EnableMKLDNN();
|
|
|
|
|
config.SwitchUseFeedFetchOps(false);
|
|
|
|
|
// Since AnalysisPredictor::Run() will reset cur_mkldnn_session_id to default
|
|
|
|
|
// before its finished, we use AnalysisPredictor::ZeroCopyRun() here to check
|
|
|
|
|
// the mkldnn_shape_blob_size.
|
|
|
|
|
if (mkldnn_input_shape_cache_capacity > 0) {
|
|
|
|
|
platform::set_cur_mkldnn_session_id(
|
|
|
|
|
platform::kMKLDNNSessionID_CacheClearing);
|
|
|
|
@ -186,7 +221,7 @@ void TestMkldnnCacheClear(int mkldnn_input_shape_cache_capacity) {
|
|
|
|
|
mkldnn_input_shape_cache_capacity);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::vector<PaddleTensor> input, output;
|
|
|
|
|
std::vector<PaddleTensor> input;
|
|
|
|
|
auto predictor = CreatePaddlePredictor<AnalysisConfig>(config);
|
|
|
|
|
|
|
|
|
|
int sample_num = 10;
|
|
|
|
@ -195,8 +230,12 @@ void TestMkldnnCacheClear(int mkldnn_input_shape_cache_capacity) {
|
|
|
|
|
auto &pool = platform::DeviceContextPool::Instance();
|
|
|
|
|
auto *dev_ctx = dynamic_cast<platform::MKLDNNDeviceContext *>(
|
|
|
|
|
pool.Get(platform::CPUPlace()));
|
|
|
|
|
// clear before test
|
|
|
|
|
dev_ctx->ResetBlobMap();
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < sample_num; i++) {
|
|
|
|
|
PrepareInputs(&input, &data, FLAGS_batch_size);
|
|
|
|
|
ConvertPaddleTensorToZeroCopyTensor(predictor.get(), input);
|
|
|
|
|
if (mkldnn_input_shape_cache_capacity > 0) {
|
|
|
|
|
std::stringstream ss;
|
|
|
|
|
for (size_t i = 0; i < input.size(); i++) {
|
|
|
|
@ -204,11 +243,9 @@ void TestMkldnnCacheClear(int mkldnn_input_shape_cache_capacity) {
|
|
|
|
|
ss << input[i].shape[j] << "-";
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// TODO(luotao): explicit following settings will be deprecated after
|
|
|
|
|
// enhance config.EnableMKLDNN() interface.
|
|
|
|
|
platform::set_cur_input_shape_str(ss.str());
|
|
|
|
|
}
|
|
|
|
|
predictor->Run(input, &output, 1);
|
|
|
|
|
predictor->ZeroCopyRun();
|
|
|
|
|
}
|
|
|
|
|
if (mkldnn_input_shape_cache_capacity > 0) {
|
|
|
|
|
PADDLE_ENFORCE_EQ(dev_ctx->GetShapeBlobSize(),
|
|
|
|
@ -216,15 +253,14 @@ void TestMkldnnCacheClear(int mkldnn_input_shape_cache_capacity) {
|
|
|
|
|
} else {
|
|
|
|
|
PADDLE_ENFORCE_EQ(dev_ctx->GetShapeBlobSize(), 1UL);
|
|
|
|
|
}
|
|
|
|
|
dev_ctx->ResetBlobMap();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST(Analyzer_MM_DNN, mkldnn_cache_clear) {
|
|
|
|
|
TEST(Analyzer_MM_DNN, mkldnn_shape_blob_size) {
|
|
|
|
|
// 0 means do not use cache clear strategy.
|
|
|
|
|
TestMkldnnCacheClear(0);
|
|
|
|
|
TestMkldnnShapeBlobSize(0);
|
|
|
|
|
// 4 means use cache clear strategy, and the
|
|
|
|
|
// mkldnn_input_shape_cache_capacity is 4.
|
|
|
|
|
TestMkldnnCacheClear(4);
|
|
|
|
|
TestMkldnnShapeBlobSize(4);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|