support C++ inference shared library on windows (#24672)
* add SetCommandLineOption * add the print_FLAGS function * remove the test demo * modify the location of macro * add the 'WITH_STATIC_LIB' option on windows * modify the macro of PD_INFER_DECL * modify the the fuction name * modify the unittest * modify the code stylev1.8
parent
23d253e1be
commit
126d3d693b
@ -0,0 +1,27 @@
|
||||
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#if defined(_WIN32)
|
||||
#ifndef PD_INFER_DECL
|
||||
#ifdef PADDLE_DLL_INFERENCE
|
||||
#define PD_INFER_DECL __declspec(dllexport)
|
||||
#else
|
||||
#define PD_INFER_DECL __declspec(dllimport)
|
||||
#endif // PADDLE_DLL_INFERENCE
|
||||
#endif // PD_INFER_DECL
|
||||
#else
|
||||
#define PD_INFER_DECL __attribute__((visibility("default")))
|
||||
#endif // _WIN32
|
@ -0,0 +1,89 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/framework/block_desc.h"
|
||||
#include "paddle/fluid/framework/op_desc.h"
|
||||
#include "paddle/fluid/framework/program_desc.h"
|
||||
#include "paddle/fluid/framework/scope.h"
|
||||
#include "paddle/fluid/inference/utils/singleton.h"
|
||||
|
||||
#include "paddle/fluid/inference/tests/api/tester_helper.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace inference {
|
||||
|
||||
struct Record {
|
||||
std::vector<float> data;
|
||||
std::vector<int32_t> shape;
|
||||
};
|
||||
|
||||
Record ProcessALine(const std::string &line) {
|
||||
VLOG(3) << "process a line";
|
||||
std::vector<std::string> columns;
|
||||
split(line, '\t', &columns);
|
||||
CHECK_EQ(columns.size(), 2UL)
|
||||
<< "data format error, should be <data>\t<shape>";
|
||||
|
||||
Record record;
|
||||
std::vector<std::string> data_strs;
|
||||
split(columns[0], ' ', &data_strs);
|
||||
for (auto &d : data_strs) {
|
||||
record.data.push_back(std::stof(d));
|
||||
}
|
||||
|
||||
std::vector<std::string> shape_strs;
|
||||
split(columns[1], ' ', &shape_strs);
|
||||
for (auto &s : shape_strs) {
|
||||
record.shape.push_back(std::stoi(s));
|
||||
}
|
||||
VLOG(3) << "data size " << record.data.size();
|
||||
VLOG(3) << "data shape size " << record.shape.size();
|
||||
return record;
|
||||
}
|
||||
|
||||
TEST(test_paddle_tensor, paddle_tensor) {
|
||||
std::unique_ptr<PaddlePredictor> predictor, analysis_predictor;
|
||||
AnalysisConfig config;
|
||||
|
||||
const std::vector<std::string> passes;
|
||||
PaddlePassBuilder testPassBuilder(passes);
|
||||
|
||||
config.SetModel(FLAGS_infer_model + "/__model__",
|
||||
FLAGS_infer_model + "/__params__");
|
||||
|
||||
predictor = CreatePaddlePredictor<NativeConfig>(config.ToNativeConfig());
|
||||
analysis_predictor = CreatePaddlePredictor(config);
|
||||
|
||||
// Just a single batch of data.
|
||||
std::string line;
|
||||
std::ifstream file(FLAGS_infer_data);
|
||||
std::getline(file, line);
|
||||
auto record = ProcessALine(line);
|
||||
file.close();
|
||||
|
||||
// Inference.
|
||||
PaddleTensor input;
|
||||
input.shape = record.shape;
|
||||
input.data =
|
||||
PaddleBuf(record.data.data(), record.data.size() * sizeof(float));
|
||||
input.dtype = PaddleDType::FLOAT32;
|
||||
|
||||
std::vector<PaddleTensor> output, analysis_output;
|
||||
predictor->Run({input}, &output, 1);
|
||||
|
||||
analysis_predictor->Run({input}, &analysis_output, 1);
|
||||
}
|
||||
|
||||
} // namespace inference
|
||||
} // namespace paddle
|
@ -0,0 +1,65 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/framework/block_desc.h"
|
||||
#include "paddle/fluid/framework/op_desc.h"
|
||||
#include "paddle/fluid/framework/program_desc.h"
|
||||
#include "paddle/fluid/framework/scope.h"
|
||||
#include "paddle/fluid/inference/utils/singleton.h"
|
||||
|
||||
#include "paddle/fluid/inference/tests/api/tester_helper.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace inference {
|
||||
|
||||
TEST(test_zerocopy_tensor, zerocopy_tensor) {
|
||||
AnalysisConfig config;
|
||||
config.SetModel(FLAGS_infer_model + "/__model__",
|
||||
FLAGS_infer_model + "/__params__");
|
||||
config.SwitchUseFeedFetchOps(false);
|
||||
|
||||
auto predictor = CreatePaddlePredictor(config);
|
||||
int batch_size = 1;
|
||||
int channels = 1;
|
||||
int height = 48;
|
||||
int width = 512;
|
||||
int nums = batch_size * channels * height * width;
|
||||
|
||||
float* input = new float[nums];
|
||||
for (int i = 0; i < nums; ++i) input[i] = 0;
|
||||
auto input_names = predictor->GetInputNames();
|
||||
PaddlePlace p = PaddlePlace::kCPU;
|
||||
PaddlePlace* place = &p;
|
||||
int size;
|
||||
|
||||
auto input_t = predictor->GetInputTensor(input_names[0]);
|
||||
input_t->Reshape({batch_size, channels, height, width});
|
||||
input_t->copy_from_cpu<float>(input);
|
||||
input_t->data<float>(place, &size);
|
||||
input_t->mutable_data<float>(p);
|
||||
|
||||
predictor->ZeroCopyRun();
|
||||
|
||||
std::vector<float> out_data;
|
||||
auto output_names = predictor->GetOutputNames();
|
||||
auto output_t = predictor->GetOutputTensor(output_names[0]);
|
||||
std::vector<int> output_shape = output_t->shape();
|
||||
int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1,
|
||||
std::multiplies<int>());
|
||||
out_data.resize(out_num);
|
||||
output_t->copy_to_cpu<float>(out_data.data());
|
||||
}
|
||||
|
||||
} // namespace inference
|
||||
} // namespace paddle
|
Loading…
Reference in new issue