Merge pull request #12049 from luotao1/demo
add independent inference demo on teamcityguochaorong-patch-1
commit
13603774b2
@ -0,0 +1,77 @@
|
||||
cmake_minimum_required(VERSION 3.0)
|
||||
|
||||
project(cpp_inference_demo CXX C)
|
||||
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
|
||||
|
||||
if(NOT DEFINED PADDLE_LIB)
|
||||
message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib")
|
||||
endif()
|
||||
if(NOT DEFINED DEMO_NAME)
|
||||
message(FATAL_ERROR "please set DEMO_NAME with -DDEMO_NAME=demo_name")
|
||||
endif()
|
||||
|
||||
option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON)
|
||||
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF)
|
||||
option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON)
|
||||
|
||||
if(WITH_GPU)
|
||||
set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library")
|
||||
endif()
|
||||
|
||||
include_directories("${PADDLE_LIB}")
|
||||
include_directories("${PADDLE_LIB}/third_party/install/protobuf/include")
|
||||
include_directories("${PADDLE_LIB}/third_party/install/glog/include")
|
||||
include_directories("${PADDLE_LIB}/third_party/install/gflags/include")
|
||||
include_directories("${PADDLE_LIB}/third_party/install/snappy/include")
|
||||
include_directories("${PADDLE_LIB}/third_party/install/snappystream/include")
|
||||
include_directories("${PADDLE_LIB}/third_party/install/zlib/include")
|
||||
|
||||
include_directories("${PADDLE_LIB}/third_party/boost")
|
||||
include_directories("${PADDLE_LIB}/third_party/eigen3")
|
||||
|
||||
link_directories("${PADDLE_LIB}/third_party/install/snappy/lib")
|
||||
link_directories("${PADDLE_LIB}/third_party/install/snappystream/lib")
|
||||
link_directories("${PADDLE_LIB}/third_party/install/protobuf/lib")
|
||||
link_directories("${PADDLE_LIB}/third_party/install/glog/lib")
|
||||
link_directories("${PADDLE_LIB}/third_party/install/gflags/lib")
|
||||
link_directories("${PADDLE_LIB}/third_party/install/zlib/lib")
|
||||
|
||||
add_executable(${DEMO_NAME} ${DEMO_NAME}.cc)
|
||||
|
||||
if(WITH_MKL)
|
||||
include_directories("${PADDLE_LIB}/third_party/install/mklml/include")
|
||||
set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel.so
|
||||
${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5.so)
|
||||
set(MKLDNN_PATH "${PADDLE_LIB}/third_party/install/mkldnn")
|
||||
if(EXISTS ${MKLDNN_PATH})
|
||||
include_directories("${MKLDNN_PATH}/include")
|
||||
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0)
|
||||
endif()
|
||||
else()
|
||||
set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas.a)
|
||||
endif()
|
||||
|
||||
if(WITH_STATIC_LIB)
|
||||
set(DEPS
|
||||
"-Wl,--whole-archive"
|
||||
${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a
|
||||
"-Wl,--no-whole-archive"
|
||||
${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.a)
|
||||
else()
|
||||
# Note: libpaddle_inference_api.so must put before libpaddle_fluid.so
|
||||
set(DEPS
|
||||
${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.so
|
||||
${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.so)
|
||||
endif()
|
||||
set(EXTERNAL_LIB "-lrt -ldl -lpthread")
|
||||
|
||||
set(DEPS ${DEPS}
|
||||
${MATH_LIB} ${MKLDNN_LIB}
|
||||
glog gflags protobuf snappystream snappy z
|
||||
${EXTERNAL_LIB})
|
||||
if(WITH_GPU)
|
||||
set(DEPS ${DEPS} ${CUDA_LIB}/libcudart.so)
|
||||
endif()
|
||||
|
||||
target_link_libraries(${DEMO_NAME} ${DEPS})
|
@ -0,0 +1,34 @@
|
||||
set -x
|
||||
PADDLE_ROOT=$1
|
||||
WITH_MKL=$2
|
||||
WITH_GPU=$3
|
||||
if [ $3 == "ON" ]; then
|
||||
use_gpu_list='true false'
|
||||
else
|
||||
use_gpu_list='false'
|
||||
fi
|
||||
|
||||
mkdir -p build
|
||||
cd build
|
||||
|
||||
for WITH_STATIC_LIB in false; do
|
||||
rm -rf *
|
||||
cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \
|
||||
-DWITH_MKL=$WITH_MKL \
|
||||
-DDEMO_NAME=simple_on_word2vec \
|
||||
-DWITH_GPU=$WITH_GPU \
|
||||
-DWITH_STATIC_LIB=$WITH_STATIC_LIB
|
||||
make
|
||||
for use_gpu in $use_gpu_list; do
|
||||
./simple_on_word2vec \
|
||||
--dirname=${PADDLE_ROOT}/build/python/paddle/fluid/tests/book/word2vec.inference.model \
|
||||
--use_gpu=$use_gpu
|
||||
done
|
||||
done
|
||||
if [ $? -eq 0 ]; then
|
||||
exit 0
|
||||
else
|
||||
echo "inference demo runs fail."
|
||||
exit 1
|
||||
fi
|
||||
set +x
|
Loading…
Reference in new issue