parent
ec90742708
commit
1d503e6a9e
@ -0,0 +1,54 @@
|
||||
# Paddle 预测golang API
|
||||
|
||||
## 安装
|
||||
首先cmake编译时打开`-DON_INFER=ON`,在编译目录下得到``fluid_inference_c_install_dir``,将该目录移动到当前目录中并重命名为`paddle_c`
|
||||
|
||||
## 在Go中使用Paddle预测
|
||||
首先创建预测配置
|
||||
``` go
|
||||
config := paddle.NewAnalysisConfig()
|
||||
config.SetModel(model_file, params_file)
|
||||
config.SwitchUseFeedFetchOps(false)
|
||||
config.SwitchSpecifyInputNames(true)
|
||||
```
|
||||
|
||||
创建predictor
|
||||
``` go
|
||||
predictor := paddle.NewPredictor(config)
|
||||
```
|
||||
|
||||
获取输入Tensor和输出Tensor
|
||||
``` go
|
||||
inputs = predictor.GetInputTensors()
|
||||
```
|
||||
|
||||
设置输入数据(假设只有一个输入)
|
||||
``` go
|
||||
input := inputs[0]
|
||||
input.SetValue(data)
|
||||
input.Reshape([]int32{1, 3, 300, 300})
|
||||
```
|
||||
|
||||
运行预测
|
||||
``` go
|
||||
predictor.ZeroCopyRun()
|
||||
```
|
||||
|
||||
获取输入Tensor的真实值
|
||||
``` go
|
||||
output := outputs[0]
|
||||
predictor.GetZeroCopyOutput(output)
|
||||
value := reflect.ValueOf(output.Value())
|
||||
shape, dtype := paddle.ShapeAndTypeOf(value)
|
||||
output_data := value.Interface().([][]float32)
|
||||
```
|
||||
|
||||
## 示例
|
||||
源码见[mobilenet](./demo/mobilenet.go)
|
||||
|
||||
下载[数据](https://paddle-inference-dist.cdn.bcebos.com/mobilenet-test-model-data.tar.gz)并解压到当前目录
|
||||
|
||||
运行
|
||||
``` go
|
||||
go run ./demo/mobilenet.go
|
||||
```
|
@ -0,0 +1,81 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package main
|
||||
|
||||
import "../paddle"
|
||||
import "strings"
|
||||
import "io/ioutil"
|
||||
import "strconv"
|
||||
import "reflect"
|
||||
|
||||
func main() {
|
||||
config := paddle.NewAnalysisConfig()
|
||||
config.SetModel("data/model/__model__", "data/model/__params__")
|
||||
config.DisableGlogInfo()
|
||||
config.SwitchUseFeedFetchOps(false)
|
||||
config.SwitchSpecifyInputNames(true)
|
||||
|
||||
predictor := paddle.NewPredictor(config)
|
||||
|
||||
println("============== paddle inference ==============")
|
||||
println("input num: ", predictor.GetInputNum())
|
||||
println("input name: ", predictor.GetInputNames()[0])
|
||||
println("output num: ", predictor.GetOutputNum())
|
||||
println("output name: ", predictor.GetInputNames()[0])
|
||||
println("============== run inference =================")
|
||||
|
||||
input := predictor.GetInputTensors()[0]
|
||||
output := predictor.GetOutputTensors()[0]
|
||||
|
||||
filename := "data/data.txt"
|
||||
data := ReadData(filename)
|
||||
input.SetValue(data[:1 * 3 * 300 * 300])
|
||||
input.Reshape([]int32{1, 3, 300, 300})
|
||||
|
||||
predictor.SetZeroCopyInput(input)
|
||||
predictor.ZeroCopyRun()
|
||||
predictor.GetZeroCopyOutput(output)
|
||||
|
||||
println("============= parse output ===================")
|
||||
output_val := output.Value()
|
||||
value := reflect.ValueOf(output_val)
|
||||
shape, dtype := paddle.ShapeAndTypeOf(value)
|
||||
switch dtype {
|
||||
case paddle.PaddleDType(paddle.FLOAT32):
|
||||
v := value.Interface().([][]float32)
|
||||
println("v: ", v[0][0], v[0][1], "...")
|
||||
case paddle.PaddleDType(paddle.UINT8):
|
||||
v := value.Interface().([][]uint8)
|
||||
println("v: ", v[0][0], v[0][1], "...")
|
||||
case paddle.PaddleDType(paddle.INT32):
|
||||
v := value.Interface().([][]int32)
|
||||
println("v: ", v[0][0], v[0][1], "...")
|
||||
case paddle.PaddleDType(paddle.INT64):
|
||||
v := value.Interface().([][]int64)
|
||||
println("v: ", v[0][0], v[0][1], "...")
|
||||
}
|
||||
println(shape[0], shape[1])
|
||||
println(output.Shape()[0])
|
||||
}
|
||||
|
||||
func ReadData(filename string) []float32 {
|
||||
file_bytes, _ := ioutil.ReadFile(filename)
|
||||
data_slice := strings.Split(string(file_bytes), " ")
|
||||
var result []float32
|
||||
for _, n := range data_slice {
|
||||
r, _ := strconv.ParseFloat(n, 32)
|
||||
result = append(result, float32(r))
|
||||
}
|
||||
return result
|
||||
}
|
@ -0,0 +1,72 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#include <paddle_c_api.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
void SetConfig(PD_AnalysisConfig *);
|
||||
void ReadData(float *data, int size);
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
PD_AnalysisConfig *config = PD_NewAnalysisConfig();
|
||||
SetConfig(config);
|
||||
PD_Predictor *predictor = PD_NewPredictor(config);
|
||||
|
||||
int input_num = PD_GetInputNum(predictor);
|
||||
printf("Input num: %d\n", input_num);
|
||||
int output_num = PD_GetOutputNum(predictor);
|
||||
printf("Output num: %d\n", output_num);
|
||||
|
||||
PD_ZeroCopyTensor input;
|
||||
input.name = const_cast<char *>(PD_GetInputName(predictor, 0)); // NOLINT
|
||||
input.data.capacity = sizeof(float) * 1 * 3 * 300 * 300;
|
||||
input.data.length = input.data.capacity;
|
||||
input.data.data = malloc(input.data.capacity);
|
||||
int shape[] = {1, 3, 300, 300};
|
||||
input.shape.data = static_cast<int *>(shape);
|
||||
input.shape.capacity = sizeof(shape);
|
||||
input.shape.length = sizeof(shape);
|
||||
input.dtype = PD_FLOAT32;
|
||||
ReadData((float *)input.data.data, 1 * 3 * 300 * 300); // NOLINT
|
||||
float *data = (float *)input.data.data; // NOLINT
|
||||
PD_SetZeroCopyInput(predictor, &input);
|
||||
int *shape_ptr = (int *)input.shape.data; // NOLINT
|
||||
|
||||
PD_ZeroCopyRun(predictor);
|
||||
PD_ZeroCopyTensor output;
|
||||
PD_InitZeroCopyTensor(&output);
|
||||
output.name = const_cast<char *>(PD_GetOutputName(predictor, 0)); // NOLINT
|
||||
PD_GetZeroCopyOutput(predictor, &output);
|
||||
PD_DestroyZeroCopyTensor(&output);
|
||||
|
||||
PD_DeleteAnalysisConfig(config);
|
||||
PD_DeletePredictor(predictor);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void SetConfig(PD_AnalysisConfig *config) {
|
||||
PD_SetModel(config, "data/model/__model__", "data/model/__params__");
|
||||
PD_SwitchUseFeedFetchOps(config, false);
|
||||
PD_SwitchSpecifyInputNames(config, true);
|
||||
PD_DisableGlogInfo(config);
|
||||
// PD_SwitchIrOptim(config, false);
|
||||
}
|
||||
|
||||
void ReadData(float *data, int n) {
|
||||
FILE *fp = fopen("data/data.txt", "r");
|
||||
for (int i = 0; i < n; i++) {
|
||||
fscanf(fp, "%f", &data[i]);
|
||||
}
|
||||
fclose(fp);
|
||||
}
|
@ -0,0 +1,47 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#include <paddle_inference_api.h>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
void SetConfig(paddle::AnalysisConfig *);
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
paddle::AnalysisConfig config;
|
||||
SetConfig(&config);
|
||||
auto predictor = paddle::CreatePaddlePredictor(config);
|
||||
auto input_name = predictor->GetInputNames()[0];
|
||||
auto input = predictor->GetInputTensor(input_name);
|
||||
std::cout << predictor->GetOutputNames()[0] << std::endl;
|
||||
std::vector<int> shape{1, 3, 300, 300};
|
||||
input->Reshape(std::move(shape));
|
||||
std::vector<float> data(1 * 300 * 300 * 3);
|
||||
std::ifstream fin("data/data.txt");
|
||||
for (int i = 0; i < data.size(); i++) {
|
||||
fin >> data[i];
|
||||
}
|
||||
|
||||
input->copy_from_cpu(data.data());
|
||||
predictor->ZeroCopyRun();
|
||||
auto output_name = predictor->GetOutputNames()[0];
|
||||
output = predictor->GetOutputTensor(output_name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void SetConfig(paddle::AnalysisConfig *config) {
|
||||
config->SetModel("data/model/__model__", "data/model/__params__");
|
||||
config->SwitchUseFeedFetchOps(true);
|
||||
config->SwitchSpecifyInputNames(true);
|
||||
config->SwitchIrOptim(false);
|
||||
}
|
@ -0,0 +1,42 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package paddle
|
||||
|
||||
// #cgo CFLAGS: -Ipaddle_c/paddle/include
|
||||
// #cgo LDFLAGS: -Lpaddle_c/paddle/lib -lpaddle_fluid_c
|
||||
// #include <stdbool.h>
|
||||
// #include <paddle_c_api.h>
|
||||
import "C"
|
||||
import "fmt"
|
||||
|
||||
func ConvertCBooleanToGo(b C.bool) bool {
|
||||
var c_false C.bool
|
||||
if b != c_false {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func numel(shape []int32) int32 {
|
||||
n := int32(1)
|
||||
for _, d := range shape {
|
||||
n *= d
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func bug(format string, args ...interface{}) error {
|
||||
return fmt.Errorf("Bug %v", fmt.Sprintf(format, args...))
|
||||
}
|
@ -0,0 +1,189 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package paddle
|
||||
|
||||
// #cgo CFLAGS: -Ipaddle_c/paddle/include
|
||||
// #cgo LDFLAGS: -Lpaddle_c/paddle/lib -lpaddle_fluid_c
|
||||
// #include <stdbool.h>
|
||||
// #include <stdlib.h>
|
||||
// #include <paddle_c_api.h>
|
||||
import "C"
|
||||
|
||||
import "runtime"
|
||||
import "unsafe"
|
||||
|
||||
type AnalysisConfig struct {
|
||||
c *C.PD_AnalysisConfig
|
||||
}
|
||||
|
||||
func NewAnalysisConfig() *AnalysisConfig {
|
||||
c_config := C.PD_NewAnalysisConfig()
|
||||
config := &AnalysisConfig{c: c_config}
|
||||
runtime.SetFinalizer(config, (*AnalysisConfig).finalize)
|
||||
return config
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) finalize() {
|
||||
C.PD_DeleteAnalysisConfig(config.c)
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) SetModel(model, params string) {
|
||||
//C.printString((*C.char)(unsafe.Pointer(&s[0])))
|
||||
c_model := C.CString(model)
|
||||
defer C.free(unsafe.Pointer(c_model))
|
||||
c_params := C.CString(params)
|
||||
defer C.free(unsafe.Pointer(c_params))
|
||||
|
||||
C.PD_SetModel(config.c, c_model, c_params)
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) ModelDir() string {
|
||||
return C.GoString(C.PD_ModelDir(config.c))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) ProgFile() string {
|
||||
return C.GoString(C.PD_ProgFile(config.c))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) ParamsFile() string {
|
||||
return C.GoString(C.PD_ParamsFile(config.c))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) EnableUseGpu(memory_pool_init_size_mb uint64, device_id int) {
|
||||
C.PD_EnableUseGpu(config.c, C.ulong(memory_pool_init_size_mb), C.int(device_id))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) DisableGpu() {
|
||||
C.PD_DisableGpu(config.c)
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) UseGpu() bool {
|
||||
return ConvertCBooleanToGo(C.PD_UseGpu(config.c))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) GpuDeviceId() int {
|
||||
return int(C.PD_GpuDeviceId(config.c))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) MemoryPoolInitSizeMb() int {
|
||||
return int(C.PD_MemoryPoolInitSizeMb(config.c))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) EnableCudnn() {
|
||||
C.PD_EnableCUDNN(config.c)
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) CudnnEnabled() bool {
|
||||
return ConvertCBooleanToGo(C.PD_CudnnEnabled(config.c))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) SwitchIrOptim(x bool) {
|
||||
C.PD_SwitchIrOptim(config.c, C.bool(x))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) IrOptim() bool {
|
||||
return ConvertCBooleanToGo(C.PD_IrOptim(config.c))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) SwitchUseFeedFetchOps(x bool) {
|
||||
C.PD_SwitchUseFeedFetchOps(config.c, C.bool(x))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) UseFeedFetchOpsEnabled() bool {
|
||||
return ConvertCBooleanToGo(C.PD_UseFeedFetchOpsEnabled(config.c))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) SwitchSpecifyInputNames(x bool) {
|
||||
C.PD_SwitchSpecifyInputNames(config.c, C.bool(x))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) SpecifyInputName() bool {
|
||||
return ConvertCBooleanToGo(C.PD_SpecifyInputName(config.c))
|
||||
}
|
||||
|
||||
//func (config *AnalysisConfig) EnableTensorRtEngine(workspace_size int)
|
||||
|
||||
func (config *AnalysisConfig) TensorrtEngineEnabled() bool {
|
||||
return ConvertCBooleanToGo(C.PD_TensorrtEngineEnabled(config.c))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) SwitchIrDebug(x bool) {
|
||||
C.PD_SwitchIrDebug(config.c, C.bool(x))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) EnableNgraph() {
|
||||
C.PD_EnableNgraph(config.c)
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) NgraphEnabled() bool {
|
||||
return ConvertCBooleanToGo(C.PD_NgraphEnabled(config.c))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) EnableMkldnn() {
|
||||
C.PD_EnableMKLDNN(config.c)
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) SetCpuMathLibraryNumThreads(n int) {
|
||||
C.PD_SetCpuMathLibraryNumThreads(config.c, C.int(n))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) CpuMathLibraryNumThreads() int {
|
||||
return int(C.PD_CpuMathLibraryNumThreads(config.c))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) EnableMkldnnQuantizer() {
|
||||
C.PD_EnableMkldnnQuantizer(config.c)
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) MkldnnQuantizerEnabled() bool {
|
||||
return ConvertCBooleanToGo(C.PD_MkldnnQuantizerEnabled(config.c))
|
||||
}
|
||||
|
||||
// SetModelBuffer
|
||||
// ModelFromMemory
|
||||
|
||||
func (config *AnalysisConfig) EnableMemoryOptim() {
|
||||
C.PD_EnableMemoryOptim(config.c)
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) MemoryOptimEnabled() bool {
|
||||
return ConvertCBooleanToGo(C.PD_MemoryOptimEnabled(config.c))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) EnableProfile() {
|
||||
C.PD_EnableProfile(config.c)
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) ProfileEnabled() bool {
|
||||
return ConvertCBooleanToGo(C.PD_ProfileEnabled(config.c))
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) DisableGlogInfo() {
|
||||
C.PD_DisableGlogInfo(config.c)
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) DeletePass(pass string) {
|
||||
c_pass := C.CString(pass)
|
||||
defer C.free(unsafe.Pointer(c_pass))
|
||||
C.PD_DeletePass(config.c, c_pass)
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) SetInValid() {
|
||||
C.PD_SetInValid(config.c)
|
||||
}
|
||||
|
||||
func (config *AnalysisConfig) IsValid() bool {
|
||||
return ConvertCBooleanToGo(C.PD_IsValid(config.c))
|
||||
}
|
@ -0,0 +1,115 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package paddle
|
||||
|
||||
// #cgo CFLAGS: -Ipaddle_c/paddle/include
|
||||
// #cgo LDFLAGS: -Lpaddle_c/paddle/lib -lpaddle_fluid_c
|
||||
// #include <stdbool.h>
|
||||
// #include "paddle_c_api.h"
|
||||
import "C"
|
||||
|
||||
import "reflect"
|
||||
import "runtime"
|
||||
import "unsafe"
|
||||
|
||||
type Predictor struct {
|
||||
c *C.PD_Predictor
|
||||
}
|
||||
|
||||
func NewPredictor(config *AnalysisConfig) *Predictor {
|
||||
c_predictor := C.PD_NewPredictor((*config).c)
|
||||
predictor := &Predictor{c: c_predictor}
|
||||
runtime.SetFinalizer(predictor, (*Predictor).finalize)
|
||||
return predictor
|
||||
}
|
||||
|
||||
func (predictor *Predictor) finalize() {
|
||||
C.PD_DeletePredictor(predictor.c)
|
||||
}
|
||||
|
||||
func DeletePredictor(predictor *Predictor) {
|
||||
C.PD_DeletePredictor(predictor.c)
|
||||
}
|
||||
|
||||
func (predictor *Predictor) GetInputNum() int {
|
||||
return int(C.PD_GetInputNum(predictor.c))
|
||||
}
|
||||
|
||||
func (predictor *Predictor) GetOutputNum() int {
|
||||
return int(C.PD_GetOutputNum(predictor.c))
|
||||
}
|
||||
|
||||
func (predictor *Predictor) GetInputName(n int) string {
|
||||
return C.GoString(C.PD_GetInputName(predictor.c, C.int(n)))
|
||||
}
|
||||
|
||||
func (predictor *Predictor) GetOutputName(n int) string {
|
||||
return C.GoString(C.PD_GetOutputName(predictor.c, C.int(n)))
|
||||
}
|
||||
|
||||
func (predictor *Predictor) GetInputTensors() [](*ZeroCopyTensor) {
|
||||
var result [](*ZeroCopyTensor)
|
||||
for i := 0; i < predictor.GetInputNum(); i++ {
|
||||
tensor := NewZeroCopyTensor()
|
||||
tensor.c.name = C.PD_GetInputName(predictor.c, C.int(i))
|
||||
result = append(result, tensor)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (predictor *Predictor) GetOutputTensors() [](*ZeroCopyTensor) {
|
||||
var result [](*ZeroCopyTensor)
|
||||
for i := 0; i < predictor.GetOutputNum(); i++ {
|
||||
tensor := NewZeroCopyTensor()
|
||||
tensor.c.name = C.PD_GetOutputName(predictor.c, C.int(i))
|
||||
result = append(result, tensor)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (predictor *Predictor) GetInputNames() []string {
|
||||
names := make([]string, predictor.GetInputNum())
|
||||
for i := 0; i < len(names); i++ {
|
||||
names[i] = predictor.GetInputName(i)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func (predictor *Predictor) GetOutputNames() []string {
|
||||
names := make([]string, predictor.GetInputNum())
|
||||
for i := 0; i < len(names); i++ {
|
||||
names[i] = predictor.GetOutputName(i)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func (predictor *Predictor) SetZeroCopyInput(tensor *ZeroCopyTensor) {
|
||||
C.PD_SetZeroCopyInput(predictor.c, tensor.c)
|
||||
}
|
||||
|
||||
func (predictor *Predictor) GetZeroCopyOutput(tensor *ZeroCopyTensor) {
|
||||
C.PD_GetZeroCopyOutput(predictor.c, tensor.c)
|
||||
tensor.name = C.GoString(tensor.c.name)
|
||||
var shape []int32
|
||||
shape_hdr := (*reflect.SliceHeader)(unsafe.Pointer(&shape))
|
||||
shape_hdr.Data = uintptr(unsafe.Pointer(tensor.c.shape.data))
|
||||
shape_hdr.Len = int(tensor.c.shape.length / C.sizeof_int)
|
||||
shape_hdr.Cap = int(tensor.c.shape.length / C.sizeof_int)
|
||||
tensor.Reshape(shape)
|
||||
}
|
||||
|
||||
func (predictor *Predictor) ZeroCopyRun() {
|
||||
C.PD_ZeroCopyRun(predictor.c)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in new issue