|
|
@ -65,13 +65,13 @@ config.model_dir = "xxx";
|
|
|
|
config.use_gpu = false;
|
|
|
|
config.use_gpu = false;
|
|
|
|
// 创建一个原生的 PaddlePredictor
|
|
|
|
// 创建一个原生的 PaddlePredictor
|
|
|
|
auto predictor =
|
|
|
|
auto predictor =
|
|
|
|
paddle::CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
|
|
|
|
paddle::CreatePaddlePredictor<paddle::NativeConfig, paddle::PaddleEngineKind::kNative>(config);
|
|
|
|
// 创建输入 tensor
|
|
|
|
// 创建输入 tensor
|
|
|
|
int64_t data[4] = {1, 2, 3, 4};
|
|
|
|
int64_t data[4] = {1, 2, 3, 4};
|
|
|
|
paddle::PaddleTensor tensor{.name = "",
|
|
|
|
paddle::PaddleTensor tensor{.name = "",
|
|
|
|
.shape = std::vector<int>({4, 1}),
|
|
|
|
.shape = std::vector<int>({4, 1}),
|
|
|
|
.data = PaddleBuf(data, sizeof(data)),
|
|
|
|
.data = paddle::PaddleBuf(data, sizeof(data)),
|
|
|
|
.dtype = PaddleDType::INT64};
|
|
|
|
.dtype = paddle::PaddleDType::INT64};
|
|
|
|
// 创建输出 tensor,输出 tensor 的内存可以复用
|
|
|
|
// 创建输出 tensor,输出 tensor 的内存可以复用
|
|
|
|
std::vector<paddle::PaddleTensor> outputs;
|
|
|
|
std::vector<paddle::PaddleTensor> outputs;
|
|
|
|
// 执行预测
|
|
|
|
// 执行预测
|
|
|
|