|
|
|
|
@ -5,12 +5,13 @@ TEST_GPU_CPU=$3 # test both GPU/CPU mode or only CPU mode
|
|
|
|
|
DATA_DIR=$4 # dataset
|
|
|
|
|
TENSORRT_INCLUDE_DIR=$5 # TensorRT header file dir, defalut to /usr/local/TensorRT/include
|
|
|
|
|
TENSORRT_LIB_DIR=$6 # TensorRT lib file dir, default to /usr/local/TensorRT/lib
|
|
|
|
|
inference_install_dir=${PADDLE_ROOT}/build/fluid_inference_install_dir
|
|
|
|
|
|
|
|
|
|
cd `dirname $0`
|
|
|
|
|
current_dir=`pwd`
|
|
|
|
|
if [ $2 == ON ]; then
|
|
|
|
|
# You can export yourself if move the install path
|
|
|
|
|
MKL_LIB=${PADDLE_ROOT}/build/fluid_install_dir/third_party/install/mklml/lib
|
|
|
|
|
MKL_LIB=${inference_install_dir}/third_party/install/mklml/lib
|
|
|
|
|
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${MKL_LIB}
|
|
|
|
|
fi
|
|
|
|
|
if [ $3 == ON ]; then
|
|
|
|
|
@ -55,7 +56,7 @@ cd build
|
|
|
|
|
for WITH_STATIC_LIB in ON OFF; do
|
|
|
|
|
# -----simple_on_word2vec-----
|
|
|
|
|
rm -rf *
|
|
|
|
|
cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \
|
|
|
|
|
cmake .. -DPADDLE_LIB=${inference_install_dir} \
|
|
|
|
|
-DWITH_MKL=$TURN_ON_MKL \
|
|
|
|
|
-DDEMO_NAME=simple_on_word2vec \
|
|
|
|
|
-DWITH_GPU=$TEST_GPU_CPU \
|
|
|
|
|
@ -75,7 +76,7 @@ for WITH_STATIC_LIB in ON OFF; do
|
|
|
|
|
fi
|
|
|
|
|
# ---------vis_demo---------
|
|
|
|
|
rm -rf *
|
|
|
|
|
cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \
|
|
|
|
|
cmake .. -DPADDLE_LIB=${inference_install_dir} \
|
|
|
|
|
-DWITH_MKL=$TURN_ON_MKL \
|
|
|
|
|
-DDEMO_NAME=vis_demo \
|
|
|
|
|
-DWITH_GPU=$TEST_GPU_CPU \
|
|
|
|
|
@ -98,7 +99,7 @@ for WITH_STATIC_LIB in ON OFF; do
|
|
|
|
|
# --------tensorrt mobilenet------
|
|
|
|
|
if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then
|
|
|
|
|
rm -rf *
|
|
|
|
|
cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \
|
|
|
|
|
cmake .. -DPADDLE_LIB=${inference_install_dir} \
|
|
|
|
|
-DWITH_MKL=$TURN_ON_MKL \
|
|
|
|
|
-DDEMO_NAME=trt_mobilenet_demo \
|
|
|
|
|
-DWITH_GPU=$TEST_GPU_CPU \
|
|
|
|
|
|