parent
607080e888
commit
c6dcffc61a
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,21 @@
|
||||
|
||||
#ifdef _WIN32
|
||||
#ifdef inference_icnet_EXPORTS
|
||||
#define API_REFERENCE extern "C" __declspec(dllexport)
|
||||
#else
|
||||
#define API_REFERENCE extern "C" __declspec(dllimport)
|
||||
#endif
|
||||
#else
|
||||
#define API_REFERENCE
|
||||
#endif
|
||||
|
||||
//API_REFERENCE void * init_predictor();
|
||||
//API_REFERENCE void destory_predictor(void *handle);
|
||||
//API_REFERENCE void predict(void *handle, int n);
|
||||
|
||||
API_REFERENCE void * init_predictor(const char* prog_file,
|
||||
const char* param_file, const float fraction_of_gpu_memory,
|
||||
const bool use_gpu, const int device);
|
||||
API_REFERENCE void predict(void* handle, float* input, const int channel, const int height,
|
||||
const int width, int64_t** output, int* output_length, int batch_size);
|
||||
API_REFERENCE void destory_predictor(void *handle);
|
@ -0,0 +1,123 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#define GOOGLE_GLOG_DLL_DECL
|
||||
#include <gflags/gflags.h>
|
||||
#include <glog/logging.h>
|
||||
#include <chrono>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include "paddle/fluid/inference/api/paddle_inference_api.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
// DEFINE_string(dirname, "./lb",
|
||||
// "Directory of the inference model.");
|
||||
|
||||
NativeConfig GetConfig() {
|
||||
NativeConfig config;
|
||||
// config.model_dir = FLAGS_dirname;
|
||||
config.prog_file= "lb/__model__";
|
||||
config.param_file= "lb/__params__";
|
||||
config.fraction_of_gpu_memory = 0.8;
|
||||
config.use_gpu = true;
|
||||
config.device = 0;
|
||||
return config;
|
||||
}
|
||||
|
||||
using Time = decltype(std::chrono::high_resolution_clock::now());
|
||||
Time time() { return std::chrono::high_resolution_clock::now(); };
|
||||
double time_diff(Time t1, Time t2) {
|
||||
typedef std::chrono::microseconds ms;
|
||||
auto diff = t2 - t1;
|
||||
ms counter = std::chrono::duration_cast<ms>(diff);
|
||||
return counter.count() / 1000.0;
|
||||
}
|
||||
|
||||
void test_naive(int batch_size){
|
||||
NativeConfig config = GetConfig();
|
||||
auto predictor = CreatePaddlePredictor<NativeConfig>(config);
|
||||
int height = 449;
|
||||
int width = 581;
|
||||
|
||||
// =============read file list =============
|
||||
std::ifstream infile("new_file.list");
|
||||
std::string temp_s;
|
||||
std::vector<std::string> all_files;
|
||||
while (!infile.eof()) {
|
||||
infile >> temp_s;
|
||||
all_files.push_back(temp_s);
|
||||
}
|
||||
|
||||
// size_t file_num = all_files.size();
|
||||
infile.close();
|
||||
// =============read file list =============
|
||||
for (size_t f_k = 0; f_k < 1; f_k ++) {
|
||||
std::ifstream in_img(all_files[f_k]);
|
||||
std::cout << all_files[f_k] << std::endl;
|
||||
float temp_v;
|
||||
|
||||
float sum_n = 0.0;
|
||||
std::vector<float> data;
|
||||
while (!in_img.eof()) {
|
||||
in_img >> temp_v;
|
||||
data.push_back(float(temp_v));
|
||||
// std::cout << temp_v << " ";
|
||||
sum_n += temp_v;
|
||||
}
|
||||
|
||||
in_img.close();
|
||||
std::cout << "sum: " << sum_n << std::endl;
|
||||
|
||||
PaddleTensor tensor;
|
||||
tensor.shape = std::vector<int>({batch_size, 3, height, width});
|
||||
tensor.data.Resize(sizeof(float) * batch_size * 3 * height * width);
|
||||
std::copy(data.begin(), data.end(), static_cast<float*>(tensor.data.data()));
|
||||
tensor.dtype = PaddleDType::FLOAT32;
|
||||
std::vector<PaddleTensor> paddle_tensor_feeds(1, tensor);
|
||||
PaddleTensor tensor_out;
|
||||
|
||||
std::vector<PaddleTensor> outputs(1, tensor_out);
|
||||
predictor->Run(paddle_tensor_feeds, &outputs, batch_size);
|
||||
std::cout << "start predict123:" << std::endl;
|
||||
auto time1 = time();
|
||||
|
||||
|
||||
for(size_t i = 0; i < 1; i++) {
|
||||
predictor->Run(paddle_tensor_feeds, &outputs, batch_size);
|
||||
}
|
||||
|
||||
auto time2 = time();
|
||||
std::ofstream ofresult("naive_test_result.txt", std::ios::app);
|
||||
|
||||
std::cout <<"batch: " << batch_size << " predict cost: " << time_diff(time1, time2) / 1000.0 << "ms" << std::endl;
|
||||
std::cout << outputs.size() << std::endl;
|
||||
int64_t * data_o = static_cast<int64_t*>(outputs[0].data.data());
|
||||
int64_t sum_out = 0;
|
||||
for (size_t j = 0; j < outputs[0].data.length() / sizeof(int64_t); ++j) {
|
||||
ofresult << std::to_string(data_o[j]) << " ";
|
||||
sum_out += data_o[j];
|
||||
}
|
||||
std::cout << "sum_out " << sum_out << std::endl;
|
||||
ofresult << std::endl;
|
||||
ofresult.close();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace paddle
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
// google::ParseCommandLineFlags(&argc, &argv, true);
|
||||
paddle::test_naive(1<<0);
|
||||
return 0;
|
||||
}
|
@ -0,0 +1,99 @@
|
||||
|
||||
#include<windows.h>
|
||||
#include <fstream>
|
||||
#include "inference_icnet.h"
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
|
||||
#include <sstream>
|
||||
using namespace std;
|
||||
|
||||
|
||||
template <class Type>
|
||||
Type stringToNum(const string& str)
|
||||
{
|
||||
istringstream iss(str);
|
||||
Type num;
|
||||
iss >> num;
|
||||
return num;
|
||||
}
|
||||
|
||||
void test_imgs() {
|
||||
void *h = init_predictor("./lb/__model__", "./lb/__params__", 0.3f, true, 0);
|
||||
|
||||
std::ifstream infile("new_file.list");
|
||||
std::ofstream ofs("./1.png.output.txt");
|
||||
|
||||
std::string temp_s;
|
||||
std::vector<std::string> all_files;
|
||||
while (!infile.eof()) {
|
||||
infile >> temp_s;
|
||||
all_files.push_back(temp_s);
|
||||
}
|
||||
// size_t file_num = all_files.size();
|
||||
infile.close();
|
||||
// =============read file list =============
|
||||
for (size_t f_k = 0; f_k < 1; f_k++) {
|
||||
// std::string path = "D:\\Paddle\\paddle\\fluid\\inference\\api\\demo_ci\\build\\Release\\";
|
||||
// std::ifstream in_img(path + all_files[f_k]);
|
||||
std::string mypath = "D:\\Paddle\\paddle\\fluid\\inference\\api\\demo_ci\\build\\Release\\1.png.txt";
|
||||
std::cout << "file" << mypath << std::endl;
|
||||
std::ifstream in_img(mypath);
|
||||
//std::cout << path + all_files[f_k] << std::endl;
|
||||
double temp_v;
|
||||
const int size = 3 * 449 * 581 * 1;
|
||||
float * data = new float[size];
|
||||
std::string value;
|
||||
|
||||
if (!in_img.is_open()) {
|
||||
cout << "open failed" << endl;
|
||||
}
|
||||
double sum_input = .0;
|
||||
for (auto i = 0; i < size; i++) {
|
||||
getline(in_img, value, '\n');
|
||||
double v = stringToNum<double>(value);
|
||||
data[i] = static_cast<float>(v);
|
||||
sum_input += v;
|
||||
}
|
||||
std::cout << "sum_input" << sum_input << std::endl;
|
||||
|
||||
in_img.close();
|
||||
const int SIZE = 449 * 581 * 1;
|
||||
int64_t * p = new int64_t[SIZE]();
|
||||
int out_size = 0;
|
||||
//memset(p, 0, size);
|
||||
predict(h, data, 3, 449, 581, &p, &out_size, 1);
|
||||
std::cout << "out_size = " << out_size << std::endl;
|
||||
|
||||
double out_sum = .0;
|
||||
for (auto i = 0; i < out_size / sizeof(int64_t); i++) {
|
||||
out_sum += p[i];
|
||||
ofs << p[i] << " ";
|
||||
}
|
||||
ofs.close();
|
||||
|
||||
std::cout << "inferece out sum" << out_sum << std::endl;
|
||||
delete p;
|
||||
}
|
||||
|
||||
destory_predictor(h);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
//if (true) {
|
||||
// std::thread t1(func, init_predictor("./infer_model/__model__", "./infer_model/__params__", 0.1f, true, 0));
|
||||
// std::thread t2(func, init_predictor("./infer_model/__model__", "./infer_model/__params__", 0.1f, true, 0));
|
||||
// //std::thread t3(func, init_predictor("./infer_model/__model__", "./infer_model/__params__", 0.1f, true, 0));
|
||||
// //std::thread t4(func, init_predictor("./infer_model/__model__", "./infer_model/__params__", 0.1f, true, 0));
|
||||
// t1.join();
|
||||
// t2.join();
|
||||
// //t3.join();
|
||||
// //t4.join();
|
||||
// //Sleep(1);
|
||||
//}
|
||||
test_imgs();
|
||||
|
||||
return 0;
|
||||
}
|
@ -0,0 +1,105 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#define GOOGLE_GLOG_DLL_DECL
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
#include <glog/logging.h>
|
||||
//#include <gtest/gtest.h>
|
||||
#include <chrono>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include "paddle/fluid/inference/api/paddle_inference_api.h"
|
||||
#include <thread> // NOLINT
|
||||
|
||||
#define ASSERT_TRUE(x) x
|
||||
#define ASSERT_EQ(x, y) assert(x == y)
|
||||
|
||||
namespace paddle {
|
||||
|
||||
// DEFINE_string(dirname, "./LB_icnet_model",
|
||||
// "Directory of the inference model.");
|
||||
|
||||
NativeConfig GetConfig() {
|
||||
NativeConfig config;
|
||||
config.prog_file= "./dzh_lb/__model__";
|
||||
config.param_file= "./dzh_lb/__params__";
|
||||
config.fraction_of_gpu_memory = 0.08;
|
||||
config.use_gpu = true;
|
||||
config.device = 0;
|
||||
return config;
|
||||
}
|
||||
|
||||
using Time = decltype(std::chrono::high_resolution_clock::now());
|
||||
Time time() { return std::chrono::high_resolution_clock::now(); };
|
||||
double time_diff(Time t1, Time t2) {
|
||||
typedef std::chrono::microseconds ms;
|
||||
auto diff = t2 - t1;
|
||||
ms counter = std::chrono::duration_cast<ms>(diff);
|
||||
return counter.count() / 1000.0;
|
||||
}
|
||||
|
||||
void test_naive(int batch_size, std::string model_path){
|
||||
PaddlePredictor* pres[2];
|
||||
|
||||
NativeConfig config = GetConfig();
|
||||
// config.model_dir = model_path;
|
||||
auto predictor0 = CreatePaddlePredictor<NativeConfig>(config);
|
||||
auto predictor1 = CreatePaddlePredictor<NativeConfig>(config);
|
||||
pres[0] = predictor0.get();
|
||||
pres[1] = predictor1.get();
|
||||
|
||||
int height = 449;
|
||||
int width = 581;
|
||||
|
||||
std::vector<float> data;
|
||||
for (int i = 0; i < 3 * height * width; i++) {
|
||||
data.push_back(0);
|
||||
}
|
||||
|
||||
PaddleTensor tensor;
|
||||
tensor.shape = std::vector<int>({batch_size, 3, height, width});
|
||||
tensor.data.Resize(sizeof(float) * batch_size * 3 * height * width);
|
||||
std::copy(data.begin(), data.end(), static_cast<float*>(tensor.data.data()));
|
||||
tensor.dtype = PaddleDType::FLOAT32;
|
||||
std::vector<PaddleTensor> paddle_tensor_feeds(1, tensor);
|
||||
|
||||
constexpr int num_jobs = 5; // each job run 1 batch
|
||||
std::vector<std::thread> threads;
|
||||
for (int tid = 0; tid < num_jobs; ++tid) {
|
||||
threads.emplace_back([&, tid]() {
|
||||
auto predictor = pres[tid];
|
||||
std::vector<PaddleTensor> local_outputs;
|
||||
for(size_t i = 0; i < 1000; i++) {
|
||||
ASSERT_TRUE(predictor->Run(paddle_tensor_feeds, &local_outputs));
|
||||
std::cout << "run: " << tid << std::endl;
|
||||
}
|
||||
ASSERT_EQ(local_outputs.size(), 1UL);
|
||||
});
|
||||
}
|
||||
for (int i = 0; i < num_jobs; ++i) {
|
||||
threads[i].join();
|
||||
}
|
||||
}
|
||||
|
||||
//TEST(alexnet, naive) {
|
||||
// test_naive(1 << 0, "./trt_models/vgg19");
|
||||
//}
|
||||
|
||||
} // namespace paddle
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
paddle::test_naive(1 << 0, "");
|
||||
}
|
||||
|
Loading…
Reference in new issue