parent
6b7bb6b54a
commit
b80eef79d9
@ -0,0 +1,47 @@
|
||||
FROM ubuntu:18.04
|
||||
MAINTAINER PaddlePaddle Authors <paddle-dev@baidu.com>
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
ENV PATH /opt/python3/bin:/root/.local/bin:$PATH
|
||||
ENV LD_LIBRARY_PATH $LD_LIBRARY_PATH:/opt/python3/lib
|
||||
|
||||
# Install Python
|
||||
ADD https://mirrors.tuna.tsinghua.edu.cn/anaconda/miniconda/Miniconda3-4.7.12.1-Linux-x86_64.sh miniconda3.sh
|
||||
RUN /bin/bash miniconda3.sh -b -p /opt/python3/ && \
|
||||
rm -f miniconda3.sh
|
||||
|
||||
RUN mkdir -p ~/.pip && \
|
||||
echo "[global]" >> ~/.pip/pip.conf && \
|
||||
echo "trusted-host = mirrors.aliyun.com" >> ~/.pip/pip.conf && \
|
||||
echo "index-url = https://mirrors.aliyun.com/pypi/simple" >> ~/.pip/pip.conf
|
||||
|
||||
RUN echo "channels:" >> ~/.condarc && \
|
||||
echo " - conda-forge" >> ~/.condarc && \
|
||||
echo " - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main/" >> ~/.condarc && \
|
||||
echo " - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/" >> ~/.condarc && \
|
||||
echo " - defaults" >> ~/.condarc && \
|
||||
echo "custom_channels:" >> ~/.condarc && \
|
||||
echo " conda-forge: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud" >> ~/.condarc && \
|
||||
echo " msys2: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud" >> ~/.condarc && \
|
||||
echo " bioconda: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud" >> ~/.condarc && \
|
||||
echo " menpo: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud" >> ~/.condarc && \
|
||||
echo " pytorch: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud" >> ~/.condarc && \
|
||||
echo " simpleitk: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud" >> ~/.condarc && \
|
||||
echo "show_channel_urls: true" >> ~/.condarc && \
|
||||
echo "channel_priority: strict" >> ~/.condarc
|
||||
|
||||
# Install R
|
||||
RUN conda install -y r -c conda-forge
|
||||
|
||||
# Install PaddlePaddle
|
||||
RUN /opt/python3/bin/python -m pip install \
|
||||
https://paddle-wheel.bj.bcebos.com/0.0.0-cpu-mkl/paddlepaddle-0.0.0-cp37-cp37m-linux_x86_64.whl
|
||||
|
||||
# Install reticulate, R interface to Python
|
||||
RUN Rscript -e 'install.packages("reticulate", repos="https://cran.rstudio.com")'
|
||||
|
||||
COPY example example
|
||||
RUN cd example && \
|
||||
curl -O https://paddle-inference-dist.cdn.bcebos.com/mobilenet-test-model-data.tar.gz && \
|
||||
tar -zxvf mobilenet-test-model-data.tar.gz && rm mobilenet-test-model-data.tar.gz
|
@ -0,0 +1,121 @@
|
||||
# R support
|
||||
|
||||
English | [简体中文](./README_cn.md)
|
||||
|
||||
Use paddle in R.
|
||||
|
||||
## Install
|
||||
### Use docker
|
||||
Download [`Dockerfile`](./Dockerfile), run
|
||||
``` bash
|
||||
docker build -t paddle-rapi:latest .
|
||||
```
|
||||
|
||||
### Local installation
|
||||
First, make sure `Python` is installed, assuming that the path is `/opt/python3.7`.
|
||||
|
||||
``` bash
|
||||
python -m pip install paddlepaddle # CPU version
|
||||
python -m pip install paddlepaddle-gpu # GPU version
|
||||
```
|
||||
|
||||
Install the R libraries needed to use paddle.
|
||||
``` r
|
||||
install.packages("reticulate") # call Python in R
|
||||
install.packages("RcppCNPy") # use numpy.ndarray in R
|
||||
```
|
||||
|
||||
## Use Paddle inference in R
|
||||
First, load PaddlePaddle in R.
|
||||
``` r
|
||||
library(reticulate)
|
||||
library(RcppCNPy)
|
||||
|
||||
use_python("/opt/python3.7/bin/python3.7")
|
||||
paddle <- import("paddle.fluid.core")
|
||||
```
|
||||
|
||||
Create an `AnalysisConfig`, which is the configuration of the paddle inference engine.
|
||||
``` r
|
||||
config <- paddle$AnalysisConfig("")
|
||||
```
|
||||
|
||||
Set model path.
|
||||
``` r
|
||||
config$set_model("model/__model__", "model/__params__")
|
||||
```
|
||||
|
||||
Use zero copy inference.
|
||||
``` r
|
||||
config$switch_use_feed_fetch_ops(FALSE)
|
||||
config$switch_specify_input_names(TRUE)
|
||||
```
|
||||
|
||||
Other configuration options and descriptions are as fallows.
|
||||
``` r
|
||||
config$enable_profile() # turn on inference profile
|
||||
config$enable_use_gpu(gpu_memory_mb, gpu_id) # use GPU
|
||||
config$disable_gpu() # disable GPU
|
||||
config$gpu_device_id() # get GPU id
|
||||
config$switch_ir_optim(TRUE) # turn on IR optimize(default is TRUE)
|
||||
config$enable_tensorrt_engine(workspace_size,
|
||||
max_batch_size,
|
||||
min_subgraph_size,
|
||||
paddle$AnalysisConfig$Precision$FLOAT32,
|
||||
use_static,
|
||||
use_calib_mode
|
||||
) # use TensorRT
|
||||
config$enable_mkldnn() # use MKLDNN
|
||||
config$delete_pass(pass_name) # delete IR pass
|
||||
```
|
||||
|
||||
Create inference engine.
|
||||
``` r
|
||||
predictor <- paddle$create_paddle_predictor(config)
|
||||
```
|
||||
|
||||
Get input tensor(assume single input), and set input data
|
||||
``` r
|
||||
input_names <- predictor$get_input_names()
|
||||
input_tensor <- predictor$get_input_tensor(input_names[1])
|
||||
input_shape <- as.integer(c(1, 3, 300, 300)) # shape has integer type
|
||||
input_data <- np_array(data, dtype="float32")$reshape(input_shape)
|
||||
input_tensor$copy_from_cpu(input_data)
|
||||
```
|
||||
|
||||
Run inference.
|
||||
``` r
|
||||
predictor$zero_copy_run()
|
||||
```
|
||||
|
||||
Get output tensor(assume single output).
|
||||
``` r
|
||||
output_names <- predictor$get_output_names()
|
||||
output_tensor <- predictor$get_output_tensor(output_names[1])
|
||||
```
|
||||
|
||||
Parse output data, and convert to `numpy.ndarray`
|
||||
``` r
|
||||
output_data <- output_tensor$copy_to_cpu()
|
||||
output_data <- np_array(output_data)
|
||||
```
|
||||
|
||||
Click to see the full [R mobilenet example](./example/mobilenet.r) and the corresponding [Python mobilenet example](./example/mobilenet.py) the above. For more examples, see [R inference example](./example).
|
||||
|
||||
## Quick start
|
||||
Download [Dockerfile](./Dockerfile) and [example](./example) to local directory, and build docker image
|
||||
``` bash
|
||||
docker build -t paddle-rapi:latest .
|
||||
```
|
||||
|
||||
Create and enter container
|
||||
``` bash
|
||||
docker run --rm -it paddle-rapi:latest bash
|
||||
```
|
||||
|
||||
Run the following command in th container
|
||||
```
|
||||
cd example
|
||||
chmod +x mobilenet.r
|
||||
./mobilenet.r
|
||||
```
|
@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env python3.7
|
||||
# pylint: skip-file
|
||||
|
||||
import functools
|
||||
import numpy as np
|
||||
from paddle.fluid.core import AnalysisConfig
|
||||
from paddle.fluid.core import AnalysisPredictor
|
||||
from paddle.fluid.core import create_paddle_predictor
|
||||
|
||||
|
||||
def main():
|
||||
config = set_config()
|
||||
predictor = create_paddle_predictor(config)
|
||||
|
||||
data, result = parse_data()
|
||||
|
||||
input_names = predictor.get_input_names()
|
||||
input_tensor = predictor.get_input_tensor(input_names[0])
|
||||
shape = (1, 3, 300, 300)
|
||||
input_data = data[:-4].astype(np.float32).reshape(shape)
|
||||
input_tensor.copy_from_cpu(input_data)
|
||||
|
||||
predictor.zero_copy_run()
|
||||
|
||||
output_names = predictor.get_output_names()
|
||||
output_tensor = predictor.get_output_tensor(output_names[0])
|
||||
output_data = output_tensor.copy_to_cpu()
|
||||
|
||||
|
||||
def set_config():
|
||||
config = AnalysisConfig("")
|
||||
config.set_model("model/__model__", "model/__params__")
|
||||
config.switch_use_feed_fetch_ops(False)
|
||||
config.switch_specify_input_names(True)
|
||||
config.enable_profile()
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def parse_data():
|
||||
""" parse input and output data """
|
||||
with open('data/data.txt', 'r') as fr:
|
||||
data = np.array([float(_) for _ in fr.read().split()])
|
||||
|
||||
with open('data/result.txt', 'r') as fr:
|
||||
result = np.array([float(_) for _ in fr.read().split()])
|
||||
|
||||
return (data, result)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env Rscript
|
||||
|
||||
library(reticulate) # call Python library
|
||||
|
||||
use_python("/opt/python3.7/bin/python")
|
||||
|
||||
np <- import("numpy")
|
||||
paddle <- import("paddle.fluid.core")
|
||||
|
||||
set_config <- function() {
|
||||
config <- paddle$AnalysisConfig("")
|
||||
config$set_model("data/model/__model__", "data/model/__params__")
|
||||
config$switch_use_feed_fetch_ops(FALSE)
|
||||
config$switch_specify_input_names(TRUE)
|
||||
config$enable_profile()
|
||||
|
||||
return(config)
|
||||
}
|
||||
|
||||
zero_copy_run_mobilenet <- function() {
|
||||
data <- np$loadtxt("data/data.txt")
|
||||
data <- data[0:(length(data) - 4)]
|
||||
result <- np$loadtxt("data/result.txt")
|
||||
result <- result[0:(length(result) - 4)]
|
||||
|
||||
config <- set_config()
|
||||
predictor <- paddle$create_paddle_predictor(config)
|
||||
|
||||
input_names <- predictor$get_input_names()
|
||||
input_tensor <- predictor$get_input_tensor(input_names[1])
|
||||
input_data <- np_array(data, dtype="float32")$reshape(as.integer(c(1, 3, 300, 300)))
|
||||
input_tensor$copy_from_cpu(input_data)
|
||||
|
||||
predictor$zero_copy_run()
|
||||
|
||||
output_names <- predictor$get_output_names()
|
||||
output_tensor <- predictor$get_output_tensor(output_names[1])
|
||||
output_data <- output_tensor$copy_to_cpu()
|
||||
output_data <- np_array(output_data)$reshape(as.integer(-1))
|
||||
#all.equal(output_data, result)
|
||||
}
|
||||
|
||||
if (!interactive()) {
|
||||
zero_copy_run_mobilenet()
|
||||
}
|
Loading…
Reference in new issue