You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
93 lines
2.7 KiB
93 lines
2.7 KiB
7 years ago
|
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||
|
|
||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||
|
you may not use this file except in compliance with the License.
|
||
|
You may obtain a copy of the License at
|
||
|
|
||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||
|
|
||
|
Unless required by applicable law or agreed to in writing, software
|
||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
|
See the License for the specific language governing permissions and
|
||
|
limitations under the License. */
|
||
|
|
||
|
#include <fstream>
|
||
|
#include <iostream>
|
||
|
#include "paddle/fluid/inference/tests/api/tester_helper.h"
|
||
|
|
||
|
namespace paddle {
|
||
|
namespace inference {
|
||
|
namespace analysis {
|
||
|
|
||
|
void SetConfig(AnalysisConfig *cfg) {
|
||
|
cfg->param_file = FLAGS_infer_model + "/params";
|
||
|
cfg->prog_file = FLAGS_infer_model + "/model";
|
||
|
cfg->use_gpu = false;
|
||
|
cfg->device = 0;
|
||
|
cfg->enable_ir_optim = true;
|
||
|
cfg->specify_input_name = true;
|
||
|
}
|
||
|
|
||
|
void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
|
||
|
PADDLE_ENFORCE_EQ(FLAGS_test_all_data, 0, "Only have single batch of data.");
|
||
|
|
||
|
PaddleTensor input;
|
||
|
// channel=3, height/width=318
|
||
|
std::vector<int> shape({FLAGS_batch_size, 3, 318, 318});
|
||
|
input.shape = shape;
|
||
|
input.dtype = PaddleDType::FLOAT32;
|
||
|
|
||
|
// fill input data, for profile easily, do not use random data here.
|
||
|
size_t size = FLAGS_batch_size * 3 * 318 * 318;
|
||
|
input.data.Resize(size * sizeof(float));
|
||
|
float *input_data = static_cast<float *>(input.data.data());
|
||
|
for (size_t i = 0; i < size; i++) {
|
||
|
*(input_data + i) = static_cast<float>(i) / size;
|
||
|
}
|
||
|
|
||
|
std::vector<PaddleTensor> input_slots;
|
||
|
input_slots.assign({input});
|
||
|
(*inputs).emplace_back(input_slots);
|
||
|
}
|
||
|
|
||
|
// Easy for profiling independently.
|
||
|
TEST(Analyzer_resnet50, profile) {
|
||
|
AnalysisConfig cfg;
|
||
|
SetConfig(&cfg);
|
||
|
std::vector<PaddleTensor> outputs;
|
||
|
|
||
|
std::vector<std::vector<PaddleTensor>> input_slots_all;
|
||
|
SetInput(&input_slots_all);
|
||
|
TestPrediction(cfg, input_slots_all, &outputs, FLAGS_num_threads);
|
||
|
|
||
|
if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) {
|
||
|
PADDLE_ENFORCE_EQ(outputs.size(), 1UL);
|
||
|
size_t size = GetSize(outputs[0]);
|
||
|
// output is a 512-dimension feature
|
||
|
EXPECT_EQ(size, 512 * FLAGS_batch_size);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
// Check the fuse status
|
||
|
TEST(Analyzer_resnet50, fuse_statis) {
|
||
|
AnalysisConfig cfg;
|
||
|
SetConfig(&cfg);
|
||
|
int num_ops;
|
||
|
GetFuseStatis(cfg, &num_ops);
|
||
|
}
|
||
|
|
||
|
// Compare result of NativeConfig and AnalysisConfig
|
||
|
TEST(Analyzer_resnet50, compare) {
|
||
|
AnalysisConfig cfg;
|
||
|
SetConfig(&cfg);
|
||
|
|
||
|
std::vector<std::vector<PaddleTensor>> input_slots_all;
|
||
|
SetInput(&input_slots_all);
|
||
|
CompareNativeAndAnalysis(cfg, input_slots_all);
|
||
|
}
|
||
|
|
||
|
} // namespace analysis
|
||
|
} // namespace inference
|
||
|
} // namespace paddle
|