|
|
@ -27,23 +27,22 @@
|
|
|
|
#include "include/api/model.h"
|
|
|
|
#include "include/api/model.h"
|
|
|
|
#include "include/api/serialization.h"
|
|
|
|
#include "include/api/serialization.h"
|
|
|
|
#include "include/api/context.h"
|
|
|
|
#include "include/api/context.h"
|
|
|
|
#include "minddata/dataset/include/minddata_eager.h"
|
|
|
|
#include "include/minddata/dataset/include/execute.h"
|
|
|
|
|
|
|
|
#include "include/minddata/dataset/include/vision_ascend.h"
|
|
|
|
#include "../inc/utils.h"
|
|
|
|
#include "../inc/utils.h"
|
|
|
|
#include "include/api/types.h"
|
|
|
|
#include "include/api/types.h"
|
|
|
|
#include "minddata/dataset/include/vision.h"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
using mindspore::api::Context;
|
|
|
|
using mindspore::Context;
|
|
|
|
using mindspore::api::Serialization;
|
|
|
|
using mindspore::GlobalContext;
|
|
|
|
using mindspore::api::Model;
|
|
|
|
using mindspore::ModelContext;
|
|
|
|
using mindspore::api::kModelOptionInsertOpCfgPath;
|
|
|
|
using mindspore::Serialization;
|
|
|
|
using mindspore::api::kModelOptionPrecisionMode;
|
|
|
|
using mindspore::Model;
|
|
|
|
using mindspore::api::kModelOptionOpSelectImplMode;
|
|
|
|
using mindspore::Status;
|
|
|
|
using mindspore::api::Status;
|
|
|
|
using mindspore::dataset::Execute;
|
|
|
|
using mindspore::api::MindDataEager;
|
|
|
|
using mindspore::MSTensor;
|
|
|
|
using mindspore::api::Buffer;
|
|
|
|
using mindspore::ModelType;
|
|
|
|
using mindspore::api::ModelType;
|
|
|
|
using mindspore::GraphCell;
|
|
|
|
using mindspore::api::GraphCell;
|
|
|
|
using mindspore::kSuccess;
|
|
|
|
using mindspore::api::SUCCESS;
|
|
|
|
|
|
|
|
using mindspore::dataset::vision::DvppDecodeResizeJpeg;
|
|
|
|
using mindspore::dataset::vision::DvppDecodeResizeJpeg;
|
|
|
|
|
|
|
|
|
|
|
|
DEFINE_string(mindir_path, "", "mindir path");
|
|
|
|
DEFINE_string(mindir_path, "", "mindir path");
|
|
|
@ -51,94 +50,103 @@ DEFINE_string(dataset_path, ".", "dataset path");
|
|
|
|
DEFINE_int32(device_id, 0, "device id");
|
|
|
|
DEFINE_int32(device_id, 0, "device id");
|
|
|
|
DEFINE_string(precision_mode, "allow_fp32_to_fp16", "precision mode");
|
|
|
|
DEFINE_string(precision_mode, "allow_fp32_to_fp16", "precision mode");
|
|
|
|
DEFINE_string(op_select_impl_mode, "", "op select impl mode");
|
|
|
|
DEFINE_string(op_select_impl_mode, "", "op select impl mode");
|
|
|
|
DEFINE_string(input_shape, "img_data:1, 3, 768, 1280; img_info:1, 4", "input shape");
|
|
|
|
|
|
|
|
DEFINE_string(input_format, "nchw", "input format");
|
|
|
|
|
|
|
|
DEFINE_string(aipp_path, "./aipp.cfg", "aipp path");
|
|
|
|
DEFINE_string(aipp_path, "./aipp.cfg", "aipp path");
|
|
|
|
|
|
|
|
|
|
|
|
int main(int argc, char **argv) {
|
|
|
|
int main(int argc, char **argv) {
|
|
|
|
gflags::ParseCommandLineFlags(&argc, &argv, true);
|
|
|
|
gflags::ParseCommandLineFlags(&argc, &argv, true);
|
|
|
|
if (RealPath(FLAGS_mindir_path).empty()) {
|
|
|
|
if (RealPath(FLAGS_mindir_path).empty()) {
|
|
|
|
std::cout << "Invalid mindir" << std::endl;
|
|
|
|
std::cout << "Invalid mindir" << std::endl;
|
|
|
|
return 1;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (RealPath(FLAGS_aipp_path).empty()) {
|
|
|
|
if (RealPath(FLAGS_aipp_path).empty()) {
|
|
|
|
std::cout << "Invalid aipp path" << std::endl;
|
|
|
|
std::cout << "Invalid aipp path" << std::endl;
|
|
|
|
return 1;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Context::Instance().SetDeviceTarget("Ascend310").SetDeviceID(FLAGS_device_id);
|
|
|
|
GlobalContext::SetGlobalDeviceTarget(mindspore::kDeviceTypeAscend310);
|
|
|
|
auto graph = Serialization::LoadModel(FLAGS_mindir_path, ModelType::kMindIR);
|
|
|
|
GlobalContext::SetGlobalDeviceID(FLAGS_device_id);
|
|
|
|
Model model((GraphCell(graph)));
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
std::map<std::string, std::string> build_options;
|
|
|
|
auto graph = Serialization::LoadModel(FLAGS_mindir_path, ModelType::kMindIR);
|
|
|
|
if (!FLAGS_precision_mode.empty()) {
|
|
|
|
auto model_context = std::make_shared<Context>();
|
|
|
|
build_options.emplace(kModelOptionPrecisionMode, FLAGS_precision_mode);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!FLAGS_op_select_impl_mode.empty()) {
|
|
|
|
|
|
|
|
build_options.emplace(kModelOptionOpSelectImplMode, FLAGS_op_select_impl_mode);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!FLAGS_aipp_path.empty()) {
|
|
|
|
if (!FLAGS_precision_mode.empty()) {
|
|
|
|
build_options.emplace(kModelOptionInsertOpCfgPath, FLAGS_aipp_path);
|
|
|
|
ModelContext::SetPrecisionMode(model_context, FLAGS_precision_mode);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!FLAGS_op_select_impl_mode.empty()) {
|
|
|
|
|
|
|
|
ModelContext::SetOpSelectImplMode(model_context, FLAGS_op_select_impl_mode);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!FLAGS_aipp_path.empty()) {
|
|
|
|
|
|
|
|
ModelContext::SetInsertOpConfigPath(model_context, FLAGS_aipp_path);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Status ret = model.Build(build_options);
|
|
|
|
Model model(GraphCell(graph), model_context);
|
|
|
|
if (ret != SUCCESS) {
|
|
|
|
Status ret = model.Build();
|
|
|
|
std::cout << "EEEEEEEERROR Build failed." << std::endl;
|
|
|
|
if (ret != kSuccess) {
|
|
|
|
return 1;
|
|
|
|
std::cout << "EEEEEEEERROR Build failed." << std::endl;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
auto all_files = GetAllFiles(FLAGS_dataset_path);
|
|
|
|
std::vector<MSTensor> model_inputs = model.GetInputs();
|
|
|
|
if (all_files.empty()) {
|
|
|
|
auto all_files = GetAllFiles(FLAGS_dataset_path);
|
|
|
|
std::cout << "ERROR: no input data." << std::endl;
|
|
|
|
if (all_files.empty()) {
|
|
|
|
return 1;
|
|
|
|
std::cout << "ERROR: no input data." << std::endl;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
std::map<double, double> costTime_map;
|
|
|
|
std::map<double, double> costTime_map;
|
|
|
|
size_t size = all_files.size();
|
|
|
|
size_t size = all_files.size();
|
|
|
|
MindDataEager SingleOp({DvppDecodeResizeJpeg({608, 608})});
|
|
|
|
Execute preprocess(std::shared_ptr<DvppDecodeResizeJpeg>(new DvppDecodeResizeJpeg({608, 608})));
|
|
|
|
for (size_t i = 0; i < size; ++i) {
|
|
|
|
for (size_t i = 0; i < size; ++i) {
|
|
|
|
struct timeval start = {0};
|
|
|
|
struct timeval start = {0};
|
|
|
|
struct timeval end = {0};
|
|
|
|
struct timeval end = {0};
|
|
|
|
double startTime_ms;
|
|
|
|
double startTime_ms;
|
|
|
|
double endTime_ms;
|
|
|
|
double endTime_ms;
|
|
|
|
std::vector<Buffer> inputs;
|
|
|
|
std::vector<MSTensor> inputs;
|
|
|
|
std::vector<Buffer> outputs;
|
|
|
|
std::vector<MSTensor> outputs;
|
|
|
|
std::cout << "Start predict input files:" << all_files[i] << std::endl;
|
|
|
|
std::cout << "Start predict input files:" << all_files[i] << std::endl;
|
|
|
|
auto imgDvpp = SingleOp(ReadFileToTensor(all_files[i]));
|
|
|
|
|
|
|
|
std::vector<float> input_shape = {608, 608};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inputs.clear();
|
|
|
|
auto img = MSTensor();
|
|
|
|
inputs.emplace_back(imgDvpp->Data(), imgDvpp->DataSize());
|
|
|
|
ret = preprocess(ReadFileToTensor(all_files[i]), &img);
|
|
|
|
inputs.emplace_back(input_shape.data(), input_shape.size() * sizeof(float));
|
|
|
|
if (ret != kSuccess) {
|
|
|
|
gettimeofday(&start, NULL);
|
|
|
|
std::cout << "preprocess " << all_files[i] << " failed." << std::endl;
|
|
|
|
ret = model.Predict(inputs, &outputs);
|
|
|
|
return 1;
|
|
|
|
gettimeofday(&end, NULL);
|
|
|
|
|
|
|
|
if (ret != SUCCESS) {
|
|
|
|
|
|
|
|
std::cout << "Predict " << all_files[i] << " failed." << std::endl;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
startTime_ms = (1.0 * start.tv_sec * 1000000 + start.tv_usec) / 1000;
|
|
|
|
|
|
|
|
endTime_ms = (1.0 * end.tv_sec * 1000000 + end.tv_usec) / 1000;
|
|
|
|
|
|
|
|
costTime_map.insert(std::pair<double, double>(startTime_ms, endTime_ms));
|
|
|
|
|
|
|
|
WriteResult(all_files[i], outputs);
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
double average = 0.0;
|
|
|
|
std::vector<float> input_shape = {608, 608};
|
|
|
|
int infer_cnt = 0;
|
|
|
|
|
|
|
|
char tmpCh[256] = {0};
|
|
|
|
inputs.clear();
|
|
|
|
for (auto iter = costTime_map.begin(); iter != costTime_map.end(); iter++) {
|
|
|
|
inputs.emplace_back(model_inputs[0].Name(), model_inputs[0].DataType(), model_inputs[0].Shape(),
|
|
|
|
double diff = 0.0;
|
|
|
|
img.Data().get(), img.DataSize());
|
|
|
|
diff = iter->second - iter->first;
|
|
|
|
inputs.emplace_back(model_inputs[1].Name(), model_inputs[1].DataType(), model_inputs[1].Shape(),
|
|
|
|
average += diff;
|
|
|
|
input_shape.data(), input_shape.size() * sizeof(float));
|
|
|
|
infer_cnt++;
|
|
|
|
|
|
|
|
|
|
|
|
gettimeofday(&start, NULL);
|
|
|
|
|
|
|
|
ret = model.Predict(inputs, &outputs);
|
|
|
|
|
|
|
|
gettimeofday(&end, NULL);
|
|
|
|
|
|
|
|
if (ret != kSuccess) {
|
|
|
|
|
|
|
|
std::cout << "Predict " << all_files[i] << " failed." << std::endl;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
average = average/infer_cnt;
|
|
|
|
startTime_ms = (1.0 * start.tv_sec * 1000000 + start.tv_usec) / 1000;
|
|
|
|
snprintf(tmpCh, sizeof(tmpCh), "NN inference cost average time: %4.3f ms of infer_count %d \n", average, infer_cnt);
|
|
|
|
endTime_ms = (1.0 * end.tv_sec * 1000000 + end.tv_usec) / 1000;
|
|
|
|
std::cout << "NN inference cost average time: "<< average << "ms of infer_count " << infer_cnt << std::endl;
|
|
|
|
costTime_map.insert(std::pair<double, double>(startTime_ms, endTime_ms));
|
|
|
|
std::string file_name = "./time_Result" + std::string("/test_perform_static.txt");
|
|
|
|
WriteResult(all_files[i], outputs);
|
|
|
|
std::ofstream file_stream(file_name.c_str(), std::ios::trunc);
|
|
|
|
}
|
|
|
|
file_stream << tmpCh;
|
|
|
|
double average = 0.0;
|
|
|
|
file_stream.close();
|
|
|
|
int infer_cnt = 0;
|
|
|
|
costTime_map.clear();
|
|
|
|
char tmpCh[256] = {0};
|
|
|
|
return 0;
|
|
|
|
for (auto iter = costTime_map.begin(); iter != costTime_map.end(); iter++) {
|
|
|
|
|
|
|
|
double diff = 0.0;
|
|
|
|
|
|
|
|
diff = iter->second - iter->first;
|
|
|
|
|
|
|
|
average += diff;
|
|
|
|
|
|
|
|
infer_cnt++;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
average = average/infer_cnt;
|
|
|
|
|
|
|
|
snprintf(tmpCh, sizeof(tmpCh), "NN inference cost average time: %4.3f ms of infer_count %d \n", average, infer_cnt);
|
|
|
|
|
|
|
|
std::cout << "NN inference cost average time: "<< average << "ms of infer_count " << infer_cnt << std::endl;
|
|
|
|
|
|
|
|
std::string file_name = "./time_Result" + std::string("/test_perform_static.txt");
|
|
|
|
|
|
|
|
std::ofstream file_stream(file_name.c_str(), std::ios::trunc);
|
|
|
|
|
|
|
|
file_stream << tmpCh;
|
|
|
|
|
|
|
|
file_stream.close();
|
|
|
|
|
|
|
|
costTime_map.clear();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|